diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp --- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp +++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp @@ -29,6 +29,7 @@ #include "clang/CodeGen/ConstantInitBuilder.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SetOperations.h" +#include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/StringExtras.h" #include "llvm/Bitcode/BitcodeReader.h" #include "llvm/IR/Constants.h" @@ -9575,12 +9576,6 @@ if (Info.NumberOfPtrs) { // Detect if we have any capture size requiring runtime evaluation of the // size so that a constant array could be eventually used. - bool hasRuntimeEvaluationCaptureSize = false; - for (llvm::Value *S : CombinedInfo.Sizes) - if (!isa(S)) { - hasRuntimeEvaluationCaptureSize = true; - break; - } llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true); QualType PointerArrayType = Ctx.getConstantArrayType( @@ -9600,33 +9595,37 @@ // need to fill up the arrays as we do for the pointers. QualType Int64Ty = Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1); - if (hasRuntimeEvaluationCaptureSize) { + SmallVector ConstSizes; + llvm::SmallBitVector RuntimeSizes(CombinedInfo.Sizes.size()); + for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) { + if (IsNonContiguous && + (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) { + ConstSizes.push_back(llvm::ConstantInt::get( + CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I])); + } else { + if (auto *CI = dyn_cast(CombinedInfo.Sizes[I])) + if (!isa(CI) && !isa(CI)) { + ConstSizes.push_back(cast(CombinedInfo.Sizes[I])); + continue; + } + ConstSizes.push_back(llvm::ConstantInt::get(CGF.Int64Ty, 0)); + RuntimeSizes.set(I); + } + } + + if (RuntimeSizes.all()) { QualType SizeArrayType = Ctx.getConstantArrayType( Int64Ty, PointerNumAP, nullptr, ArrayType::Normal, /*IndexTypeQuals=*/0); Info.SizesArray = CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer(); } else { - // We expect all the sizes to be constant, so we collect them to create - // a constant array. - SmallVector ConstSizes; - for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) { - if (IsNonContiguous && - (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) { - ConstSizes.push_back(llvm::ConstantInt::get( - CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I])); - } else { - ConstSizes.push_back(cast(CombinedInfo.Sizes[I])); - } - } - auto *SizesArrayInit = llvm::ConstantArray::get( llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes); std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"}); auto *SizesArrayGbl = new llvm::GlobalVariable( - CGM.getModule(), SizesArrayInit->getType(), - /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, - SizesArrayInit, Name); + CGM.getModule(), SizesArrayInit->getType(), !RuntimeSizes.any(), + llvm::GlobalValue::PrivateLinkage, SizesArrayInit, Name); SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global); Info.SizesArray = SizesArrayGbl; } @@ -9700,7 +9699,7 @@ Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy)); CGF.Builder.CreateStore(PVal, PAddr); - if (hasRuntimeEvaluationCaptureSize) { + if (RuntimeSizes.test(I)) { llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32( llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs), Info.SizesArray, diff --git a/clang/test/OpenMP/reduction_implicit_map.cpp b/clang/test/OpenMP/reduction_implicit_map.cpp --- a/clang/test/OpenMP/reduction_implicit_map.cpp +++ b/clang/test/OpenMP/reduction_implicit_map.cpp @@ -1,3 +1,4 @@ +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ // RUN: %clang_cc1 -verify -fopenmp -fopenmp-cuda-mode -x c++ \ // RUN: -triple powerpc64le-unknown-unknown -DCUDA \ // RUN: -fopenmp-targets=nvptx64-nvidia-cuda -emit-llvm-bc %s -o \ @@ -34,7 +35,6 @@ return 0; } // CHECK-NOT @.offload_maptypes -// CHECK: call void @__kmpc_nvptx_end_reduce_nowait( #elif defined(DIAG) class S2 { mutable int a; @@ -57,11 +57,7 @@ return 0; } // map for variable o -// CHECK1: offload_sizes = private unnamed_addr constant [1 x i64] [i64 4] -// CHECK1: offload_maptypes = private unnamed_addr constant [1 x i64] [i64 547] // map for b: -// CHECK1: @.offload_sizes{{.*}} = private unnamed_addr constant [1 x i64] [i64 8000] -// CHECK1: @.offload_maptypes{{.*}} = private unnamed_addr constant [1 x i64] [i64 547] #else // expected-no-diagnostics @@ -86,25 +82,6 @@ for (int i = 0; i < size; i++) ; } -//CHECK2: @.offload_sizes = private unnamed_addr constant [2 x i64] [i64 4, i64 8] -//CHECK2: @.offload_maptypes.10 = private unnamed_addr constant [2 x i64] [i64 800, i64 547] -//CHECK2: @.offload_sizes.13 = private unnamed_addr constant [2 x i64] [i64 4, i64 4] -//CHECK2: @.offload_maptypes.14 = private unnamed_addr constant [2 x i64] [i64 800, i64 547] -//CHECK2: define dso_local void @_Z3sumPiiS_ -//CHECK2-NEXT: entry -//CHECK2-NEXT: [[INP:%.*]] = alloca i32* -//CHECK2-NEXT: [[SIZE:%.*]] = alloca i32 -//CHECK2-NEXT: [[OUTP:%.*]] = alloca i32* -//CHECK2: [[OFFSIZE:%.*]] = alloca [3 x i64] -//CHECK2: [[OFFSIZE10:%.*]] = alloca [3 x i64] -//CHECK2: [[T15:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[OFFSIZE]], i32 0, i32 0 -//CHECK2-NEXT: store i64 4, i64* [[T15]] -//CHECK2: [[T21:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[OFFSIZE]], i32 0, i32 1 -//CHECK2-NEXT: store i64 4, i64* [[T21]] -//CHECK2: [[T53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[OFFSIZE10]], i32 0, i32 0 -//CHECK2-NEXT: store i64 4, i64* [[T53]] -//CHECK2: [[T59:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[OFFSIZE10]], i32 0, i32 1 -//CHECK2-NEXT: store i64 12, i64* [[T59]] #endif int main() { @@ -120,3 +97,1987 @@ #endif return 0; } +// CHECK-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l32 +// CHECK-SAME: (double* noundef [[E:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 +// CHECK-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x i8*], align 8 +// CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(%struct.ident_t* @[[GLOB1:[0-9]+]], i8 2, i1 false, i1 true) +// CHECK-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 +// CHECK-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] +// CHECK: user_code.entry: +// CHECK-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) +// CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[CAPTURED_VARS_ADDRS]], i64 0, i64 0 +// CHECK-NEXT: [[TMP4:%.*]] = bitcast double* [[TMP2]] to i8* +// CHECK-NEXT: store i8* [[TMP4]], i8** [[TMP3]], align 8 +// CHECK-NEXT: [[TMP5:%.*]] = bitcast [1 x i8*]* [[CAPTURED_VARS_ADDRS]] to i8** +// CHECK-NEXT: call void @__kmpc_parallel_51(%struct.ident_t* @[[GLOB2]], i32 [[TMP1]], i32 1, i32 -1, i32 -1, i8* bitcast (void (i32*, i32*, double*)* @__omp_outlined__ to i8*), i8* null, i8** [[TMP5]], i64 1) +// CHECK-NEXT: call void @__kmpc_target_deinit(%struct.ident_t* @[[GLOB1]], i8 2, i1 true) +// CHECK-NEXT: ret void +// CHECK: worker.exit: +// CHECK-NEXT: ret void +// +// +// CHECK-LABEL: define {{[^@]+}}@__omp_outlined__ +// CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], double* noundef [[E:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK-NEXT: [[E_ADDR:%.*]] = alloca double*, align 8 +// CHECK-NEXT: [[E2:%.*]] = alloca double, align 8 +// CHECK-NEXT: [[TMP:%.*]] = alloca double*, align 8 +// CHECK-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 +// CHECK-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK-NEXT: store double* [[E]], double** [[E_ADDR]], align 8 +// CHECK-NEXT: [[TMP0:%.*]] = load double*, double** [[E_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, double* [[TMP0]], i64 0 +// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[E_ADDR]], align 8 +// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds double, double* [[TMP1]], i64 0 +// CHECK-NEXT: store double 0.000000e+00, double* [[E2]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load double*, double** [[E_ADDR]], align 8 +// CHECK-NEXT: [[TMP3:%.*]] = ptrtoint double* [[TMP2]] to i64 +// CHECK-NEXT: [[TMP4:%.*]] = ptrtoint double* [[ARRAYIDX]] to i64 +// CHECK-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] +// CHECK-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) +// CHECK-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[E2]], i64 [[TMP6]] +// CHECK-NEXT: store double* [[TMP7]], double** [[TMP]], align 8 +// CHECK-NEXT: [[TMP8:%.*]] = load double*, double** [[TMP]], align 8 +// CHECK-NEXT: store double 1.000000e+01, double* [[TMP8]], align 8 +// CHECK-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 +// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast double* [[E2]] to i8* +// CHECK-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 8 +// CHECK-NEXT: [[TMP13:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK-NEXT: [[TMP14:%.*]] = call i32 @__kmpc_nvptx_parallel_reduce_nowait_v2(%struct.ident_t* @[[GLOB2]], i32 [[TMP10]], i32 1, i64 8, i8* [[TMP13]], void (i8*, i16, i16, i16)* @_omp_reduction_shuffle_and_reduce_func, void (i8*, i32)* @_omp_reduction_inter_warp_copy_func) +// CHECK-NEXT: [[TMP15:%.*]] = icmp eq i32 [[TMP14]], 1 +// CHECK-NEXT: br i1 [[TMP15]], label [[DOTOMP_REDUCTION_THEN:%.*]], label [[DOTOMP_REDUCTION_DONE:%.*]] +// CHECK: .omp.reduction.then: +// CHECK-NEXT: [[TMP16:%.*]] = load double, double* [[ARRAYIDX]], align 8 +// CHECK-NEXT: [[TMP17:%.*]] = load double, double* [[E2]], align 8 +// CHECK-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] +// CHECK-NEXT: store double [[ADD]], double* [[ARRAYIDX]], align 8 +// CHECK-NEXT: call void @__kmpc_nvptx_end_reduce_nowait(i32 [[TMP10]]) +// CHECK-NEXT: br label [[DOTOMP_REDUCTION_DONE]] +// CHECK: .omp.reduction.done: +// CHECK-NEXT: ret void +// +// +// CHECK-LABEL: define {{[^@]+}}@_omp_reduction_shuffle_and_reduce_func +// CHECK-SAME: (i8* noundef [[TMP0:%.*]], i16 noundef signext [[TMP1:%.*]], i16 noundef signext [[TMP2:%.*]], i16 noundef signext [[TMP3:%.*]]) #[[ATTR2:[0-9]+]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 +// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[DOTADDR2:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[DOTADDR3:%.*]] = alloca i16, align 2 +// CHECK-NEXT: [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST:%.*]] = alloca [1 x i8*], align 8 +// CHECK-NEXT: [[DOTOMP_REDUCTION_ELEMENT:%.*]] = alloca double, align 8 +// CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 +// CHECK-NEXT: store i16 [[TMP1]], i16* [[DOTADDR1]], align 2 +// CHECK-NEXT: store i16 [[TMP2]], i16* [[DOTADDR2]], align 2 +// CHECK-NEXT: store i16 [[TMP3]], i16* [[DOTADDR3]], align 2 +// CHECK-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR]], align 8 +// CHECK-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK-NEXT: [[TMP6:%.*]] = load i16, i16* [[DOTADDR1]], align 2 +// CHECK-NEXT: [[TMP7:%.*]] = load i16, i16* [[DOTADDR2]], align 2 +// CHECK-NEXT: [[TMP8:%.*]] = load i16, i16* [[DOTADDR3]], align 2 +// CHECK-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 +// CHECK-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 +// CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP10]] to double* +// CHECK-NEXT: [[TMP13:%.*]] = getelementptr double, double* [[TMP12]], i64 1 +// CHECK-NEXT: [[TMP14:%.*]] = bitcast double* [[TMP13]] to i8* +// CHECK-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP12]] to i64* +// CHECK-NEXT: [[TMP16:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i64* +// CHECK-NEXT: [[TMP17:%.*]] = load i64, i64* [[TMP15]], align 8 +// CHECK-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_get_warp_size() +// CHECK-NEXT: [[TMP19:%.*]] = trunc i32 [[TMP18]] to i16 +// CHECK-NEXT: [[TMP20:%.*]] = call i64 @__kmpc_shuffle_int64(i64 [[TMP17]], i16 [[TMP7]], i16 [[TMP19]]) +// CHECK-NEXT: store i64 [[TMP20]], i64* [[TMP16]], align 8 +// CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, i64* [[TMP15]], i64 1 +// CHECK-NEXT: [[TMP22:%.*]] = getelementptr i64, i64* [[TMP16]], i64 1 +// CHECK-NEXT: [[TMP23:%.*]] = bitcast double* [[DOTOMP_REDUCTION_ELEMENT]] to i8* +// CHECK-NEXT: store i8* [[TMP23]], i8** [[TMP11]], align 8 +// CHECK-NEXT: [[TMP24:%.*]] = icmp eq i16 [[TMP8]], 0 +// CHECK-NEXT: [[TMP25:%.*]] = icmp eq i16 [[TMP8]], 1 +// CHECK-NEXT: [[TMP26:%.*]] = icmp ult i16 [[TMP6]], [[TMP7]] +// CHECK-NEXT: [[TMP27:%.*]] = and i1 [[TMP25]], [[TMP26]] +// CHECK-NEXT: [[TMP28:%.*]] = icmp eq i16 [[TMP8]], 2 +// CHECK-NEXT: [[TMP29:%.*]] = and i16 [[TMP6]], 1 +// CHECK-NEXT: [[TMP30:%.*]] = icmp eq i16 [[TMP29]], 0 +// CHECK-NEXT: [[TMP31:%.*]] = and i1 [[TMP28]], [[TMP30]] +// CHECK-NEXT: [[TMP32:%.*]] = icmp sgt i16 [[TMP7]], 0 +// CHECK-NEXT: [[TMP33:%.*]] = and i1 [[TMP31]], [[TMP32]] +// CHECK-NEXT: [[TMP34:%.*]] = or i1 [[TMP24]], [[TMP27]] +// CHECK-NEXT: [[TMP35:%.*]] = or i1 [[TMP34]], [[TMP33]] +// CHECK-NEXT: br i1 [[TMP35]], label [[THEN:%.*]], label [[ELSE:%.*]] +// CHECK: then: +// CHECK-NEXT: [[TMP36:%.*]] = bitcast [1 x i8*]* [[TMP5]] to i8* +// CHECK-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]] to i8* +// CHECK-NEXT: call void @"_omp$reduction$reduction_func"(i8* [[TMP36]], i8* [[TMP37]]) #[[ATTR3:[0-9]+]] +// CHECK-NEXT: br label [[IFCONT:%.*]] +// CHECK: else: +// CHECK-NEXT: br label [[IFCONT]] +// CHECK: ifcont: +// CHECK-NEXT: [[TMP38:%.*]] = icmp eq i16 [[TMP8]], 1 +// CHECK-NEXT: [[TMP39:%.*]] = icmp uge i16 [[TMP6]], [[TMP7]] +// CHECK-NEXT: [[TMP40:%.*]] = and i1 [[TMP38]], [[TMP39]] +// CHECK-NEXT: br i1 [[TMP40]], label [[THEN4:%.*]], label [[ELSE5:%.*]] +// CHECK: then4: +// CHECK-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_REMOTE_REDUCE_LIST]], i64 0, i64 0 +// CHECK-NEXT: [[TMP42:%.*]] = load i8*, i8** [[TMP41]], align 8 +// CHECK-NEXT: [[TMP43:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 +// CHECK-NEXT: [[TMP44:%.*]] = load i8*, i8** [[TMP43]], align 8 +// CHECK-NEXT: [[TMP45:%.*]] = bitcast i8* [[TMP42]] to double* +// CHECK-NEXT: [[TMP46:%.*]] = bitcast i8* [[TMP44]] to double* +// CHECK-NEXT: [[TMP47:%.*]] = load double, double* [[TMP45]], align 8 +// CHECK-NEXT: store double [[TMP47]], double* [[TMP46]], align 8 +// CHECK-NEXT: br label [[IFCONT6:%.*]] +// CHECK: else5: +// CHECK-NEXT: br label [[IFCONT6]] +// CHECK: ifcont6: +// CHECK-NEXT: ret void +// +// +// CHECK-LABEL: define {{[^@]+}}@_omp_reduction_inter_warp_copy_func +// CHECK-SAME: (i8* noundef [[TMP0:%.*]], i32 noundef [[TMP1:%.*]]) #[[ATTR2]] { +// CHECK-NEXT: entry: +// CHECK-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 +// CHECK-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[DOTCNT_ADDR:%.*]] = alloca i32, align 4 +// CHECK-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2]]) +// CHECK-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 +// CHECK-NEXT: store i32 [[TMP1]], i32* [[DOTADDR1]], align 4 +// CHECK-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() +// CHECK-NEXT: [[TMP4:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() +// CHECK-NEXT: [[NVPTX_LANE_ID:%.*]] = and i32 [[TMP4]], 31 +// CHECK-NEXT: [[TMP5:%.*]] = call i32 @__kmpc_get_hardware_thread_id_in_block() +// CHECK-NEXT: [[NVPTX_WARP_ID:%.*]] = ashr i32 [[TMP5]], 5 +// CHECK-NEXT: [[TMP6:%.*]] = load i8*, i8** [[DOTADDR]], align 8 +// CHECK-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to [1 x i8*]* +// CHECK-NEXT: store i32 0, i32* [[DOTCNT_ADDR]], align 4 +// CHECK-NEXT: br label [[PRECOND:%.*]] +// CHECK: precond: +// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCNT_ADDR]], align 4 +// CHECK-NEXT: [[TMP9:%.*]] = icmp ult i32 [[TMP8]], 2 +// CHECK-NEXT: br i1 [[TMP9]], label [[BODY:%.*]], label [[EXIT:%.*]] +// CHECK: body: +// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP2]]) +// CHECK-NEXT: [[WARP_MASTER:%.*]] = icmp eq i32 [[NVPTX_LANE_ID]], 0 +// CHECK-NEXT: br i1 [[WARP_MASTER]], label [[THEN:%.*]], label [[ELSE:%.*]] +// CHECK: then: +// CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 +// CHECK-NEXT: [[TMP11:%.*]] = load i8*, i8** [[TMP10]], align 8 +// CHECK-NEXT: [[TMP12:%.*]] = bitcast i8* [[TMP11]] to i32* +// CHECK-NEXT: [[TMP13:%.*]] = getelementptr i32, i32* [[TMP12]], i32 [[TMP8]] +// CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[NVPTX_WARP_ID]] +// CHECK-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP13]], align 4 +// CHECK-NEXT: store volatile i32 [[TMP15]], i32 addrspace(3)* [[TMP14]], align 4 +// CHECK-NEXT: br label [[IFCONT:%.*]] +// CHECK: else: +// CHECK-NEXT: br label [[IFCONT]] +// CHECK: ifcont: +// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3]], i32 [[TMP2]]) +// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTADDR1]], align 4 +// CHECK-NEXT: [[IS_ACTIVE_THREAD:%.*]] = icmp ult i32 [[TMP3]], [[TMP16]] +// CHECK-NEXT: br i1 [[IS_ACTIVE_THREAD]], label [[THEN2:%.*]], label [[ELSE3:%.*]] +// CHECK: then2: +// CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds [32 x i32], [32 x i32] addrspace(3)* @__openmp_nvptx_data_transfer_temporary_storage, i64 0, i32 [[TMP3]] +// CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP7]], i64 0, i64 0 +// CHECK-NEXT: [[TMP19:%.*]] = load i8*, i8** [[TMP18]], align 8 +// CHECK-NEXT: [[TMP20:%.*]] = bitcast i8* [[TMP19]] to i32* +// CHECK-NEXT: [[TMP21:%.*]] = getelementptr i32, i32* [[TMP20]], i32 [[TMP8]] +// CHECK-NEXT: [[TMP22:%.*]] = load volatile i32, i32 addrspace(3)* [[TMP17]], align 4 +// CHECK-NEXT: store i32 [[TMP22]], i32* [[TMP21]], align 4 +// CHECK-NEXT: br label [[IFCONT4:%.*]] +// CHECK: else3: +// CHECK-NEXT: br label [[IFCONT4]] +// CHECK: ifcont4: +// CHECK-NEXT: [[TMP23:%.*]] = add nsw i32 [[TMP8]], 1 +// CHECK-NEXT: store i32 [[TMP23]], i32* [[DOTCNT_ADDR]], align 4 +// CHECK-NEXT: br label [[PRECOND]] +// CHECK: exit: +// CHECK-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_Z3barv +// CHECK1-SAME: () #[[ATTR0:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[O:%.*]] = alloca [5 x %class.S2], align 4 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [1 x i8*], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [1 x i8*], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [1 x i8*], align 8 +// CHECK1-NEXT: [[B:%.*]] = alloca [10 x [10 x [10 x double]]], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS1:%.*]] = alloca [1 x i8*], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_PTRS2:%.*]] = alloca [1 x i8*], align 8 +// CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS3:%.*]] = alloca [1 x i8*], align 8 +// CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i32 0, i32 0 +// CHECK1-NEXT: [[ARRAYCTOR_END:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[ARRAY_BEGIN]], i64 5 +// CHECK1-NEXT: br label [[ARRAYCTOR_LOOP:%.*]] +// CHECK1: arrayctor.loop: +// CHECK1-NEXT: [[ARRAYCTOR_CUR:%.*]] = phi %class.S2* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[ARRAYCTOR_NEXT:%.*]], [[ARRAYCTOR_LOOP]] ] +// CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYCTOR_CUR]]) +// CHECK1-NEXT: [[ARRAYCTOR_NEXT]] = getelementptr inbounds [[CLASS_S2]], %class.S2* [[ARRAYCTOR_CUR]], i64 1 +// CHECK1-NEXT: [[ARRAYCTOR_DONE:%.*]] = icmp eq %class.S2* [[ARRAYCTOR_NEXT]], [[ARRAYCTOR_END]] +// CHECK1-NEXT: br i1 [[ARRAYCTOR_DONE]], label [[ARRAYCTOR_CONT:%.*]], label [[ARRAYCTOR_LOOP]] +// CHECK1: arrayctor.cont: +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[O]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [5 x %class.S2]** +// CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[TMP1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8** [[TMP2]] to %class.S2** +// CHECK1-NEXT: store %class.S2* [[ARRAYIDX]], %class.S2** [[TMP3]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP4]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 +// CHECK1-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1: omp_offload.failed: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50([5 x %class.S2]* [[O]]) #[[ATTR8:[0-9]+]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK1: omp_offload.cont: +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [10 x [10 x [10 x double]]]** +// CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to [10 x [10 x [10 x double]]]** +// CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS3]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP13]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 +// CHECK1-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED4:%.*]], label [[OMP_OFFLOAD_CONT5:%.*]] +// CHECK1: omp_offload.failed4: +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55([10 x [10 x [10 x double]]]* [[B]]) #[[ATTR8]] +// CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT5]] +// CHECK1: omp_offload.cont5: +// CHECK1-NEXT: ret i32 0 +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C1Ev +// CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 +// CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 +// CHECK1-NEXT: call void @_ZN2S2C2Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l50 +// CHECK1-SAME: ([5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR2:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 +// CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [5 x %class.S2]*)* @.omp_outlined. to void (i32*, i32*, ...)*), [5 x %class.S2]* [[TMP0]]) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. +// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [5 x %class.S2]* noundef nonnull align 4 dereferenceable(20) [[O:%.*]]) #[[ATTR3:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK1-NEXT: [[O_ADDR:%.*]] = alloca [5 x %class.S2]*, align 8 +// CHECK1-NEXT: [[O1:%.*]] = alloca [[CLASS_S2:%.*]], align 4 +// CHECK1-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 8 +// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store [5 x %class.S2]* [[O]], [5 x %class.S2]** [[O_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load [5 x %class.S2]*, [5 x %class.S2]** [[O_ADDR]], align 8 +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [5 x %class.S2], [5 x %class.S2]* [[TMP0]], i64 0, i64 0 +// CHECK1-NEXT: call void @_ZN2S2C1Ev(%class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) +// CHECK1-NEXT: [[TMP1:%.*]] = bitcast [5 x %class.S2]* [[TMP0]] to %class.S2* +// CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint %class.S2* [[TMP1]] to i64 +// CHECK1-NEXT: [[TMP3:%.*]] = ptrtoint %class.S2* [[ARRAYIDX]] to i64 +// CHECK1-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] +// CHECK1-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (%class.S2* getelementptr ([[CLASS_S2]], %class.S2* null, i32 1) to i64) +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr [[CLASS_S2]], %class.S2* [[O1]], i64 [[TMP5]] +// CHECK1-NEXT: [[TMP7:%.*]] = bitcast %class.S2* [[TMP6]] to [5 x %class.S2]* +// CHECK1-NEXT: store i32 0, i32* [[I]], align 4 +// CHECK1-NEXT: br label [[FOR_COND:%.*]] +// CHECK1: for.cond: +// CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 +// CHECK1-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], 10 +// CHECK1-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] +// CHECK1: for.body: +// CHECK1-NEXT: br label [[FOR_INC:%.*]] +// CHECK1: for.inc: +// CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[I]], align 4 +// CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP9]], 1 +// CHECK1-NEXT: store i32 [[INC]], i32* [[I]], align 4 +// CHECK1-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP5:![0-9]+]] +// CHECK1: for.end: +// CHECK1-NEXT: [[TMP10:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP11:%.*]] = bitcast %class.S2* [[O1]] to i8* +// CHECK1-NEXT: store i8* [[TMP11]], i8** [[TMP10]], align 8 +// CHECK1-NEXT: [[TMP12:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP12]], align 4 +// CHECK1-NEXT: [[TMP14:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK1-NEXT: [[TMP15:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP13]], i32 1, i64 8, i8* [[TMP14]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP15]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK1-NEXT: ] +// CHECK1: .omp.reduction.case1: +// CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) +// CHECK1-NEXT: [[TMP16:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* +// CHECK1-NEXT: [[TMP17:%.*]] = bitcast %class.S2* [[CALL]] to i8* +// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP16]], i8* align 4 [[TMP17]], i64 4, i1 false) +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP13]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK1: .omp.reduction.case2: +// CHECK1-NEXT: [[TMP18:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 4 +// CHECK1-NEXT: call void @__kmpc_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) +// CHECK1-NEXT: [[CALL2:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[ARRAYIDX]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[O1]]) +// CHECK1-NEXT: [[TMP20:%.*]] = bitcast %class.S2* [[ARRAYIDX]] to i8* +// CHECK1-NEXT: [[TMP21:%.*]] = bitcast %class.S2* [[CALL2]] to i8* +// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP20]], i8* align 4 [[TMP21]], i64 4, i1 false) +// CHECK1-NEXT: call void @__kmpc_end_critical(%struct.ident_t* @[[GLOB2]], i32 [[TMP19]], [8 x i32]* @.gomp_critical_user_.atomic_reduction.var) +// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK1: .omp.reduction.default: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func +// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* +// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %class.S2* +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to %class.S2* +// CHECK1-NEXT: [[CALL:%.*]] = call noundef nonnull align 4 dereferenceable(4) %class.S2* @_ZN2S2plERS_(%class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP11]], %class.S2* noundef nonnull align 4 dereferenceable(4) [[TMP8]]) +// CHECK1-NEXT: [[TMP12:%.*]] = bitcast %class.S2* [[TMP11]] to i8* +// CHECK1-NEXT: [[TMP13:%.*]] = bitcast %class.S2* [[CALL]] to i8* +// CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP12]], i8* align 4 [[TMP13]], i64 4, i1 false) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3barv_l55 +// CHECK1-SAME: ([10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR2]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 +// CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x [10 x [10 x double]]]*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), [10 x [10 x [10 x double]]]* [[TMP0]]) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1 +// CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x [10 x [10 x double]]]* noundef nonnull align 8 dereferenceable(8000) [[B:%.*]]) #[[ATTR3]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 +// CHECK1-NEXT: [[B_ADDR:%.*]] = alloca [10 x [10 x [10 x double]]]*, align 8 +// CHECK1-NEXT: [[DOTOMP_IV:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[TMP:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_LB:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_UB:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[SAVED_STACK:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: [[__VLA_EXPR0:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTRD_INPUT_:%.*]] = alloca [1 x %struct.kmp_taskred_input_t], align 8 +// CHECK1-NEXT: [[DOTTASK_RED_:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: [[I:%.*]] = alloca i64, align 8 +// CHECK1-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [2 x i8*], align 8 +// CHECK1-NEXT: [[ATOMIC_TEMP:%.*]] = alloca double, align 8 +// CHECK1-NEXT: [[_TMP30:%.*]] = alloca double, align 8 +// CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 +// CHECK1-NEXT: store [10 x [10 x [10 x double]]]* [[B]], [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 +// CHECK1-NEXT: [[TMP0:%.*]] = load [10 x [10 x [10 x double]]]*, [10 x [10 x [10 x double]]]** [[B_ADDR]], align 8 +// CHECK1-NEXT: store i64 0, i64* [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: store i64 9, i64* [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: store i64 1, i64* [[DOTOMP_STRIDE]], align 8 +// CHECK1-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYDECAY:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY]], i64 2 +// CHECK1-NEXT: [[ARRAYDECAY2:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX1]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY2]], i64 1 +// CHECK1-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 +// CHECK1-NEXT: [[ARRAYDECAY5:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX4]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY5]], i64 5 +// CHECK1-NEXT: [[ARRAYDECAY7:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX6]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY7]], i64 1 +// CHECK1-NEXT: [[TMP1:%.*]] = ptrtoint double* [[ARRAYIDX8]] to i64 +// CHECK1-NEXT: [[TMP2:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 +// CHECK1-NEXT: [[TMP3:%.*]] = sub i64 [[TMP1]], [[TMP2]] +// CHECK1-NEXT: [[TMP4:%.*]] = sdiv exact i64 [[TMP3]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) +// CHECK1-NEXT: [[TMP5:%.*]] = add nuw i64 [[TMP4]], 1 +// CHECK1-NEXT: [[TMP6:%.*]] = mul nuw i64 [[TMP5]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) +// CHECK1-NEXT: [[TMP7:%.*]] = call i8* @llvm.stacksave() +// CHECK1-NEXT: store i8* [[TMP7]], i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: [[VLA:%.*]] = alloca double, i64 [[TMP5]], align 8 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[__VLA_EXPR0]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP5]] +// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[VLA]], [[TMP8]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] +// CHECK1: omp.arrayinit.body: +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] +// CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] +// CHECK1: omp.arrayinit.done: +// CHECK1-NEXT: [[TMP9:%.*]] = bitcast [10 x [10 x [10 x double]]]* [[TMP0]] to double* +// CHECK1-NEXT: [[TMP10:%.*]] = ptrtoint double* [[TMP9]] to i64 +// CHECK1-NEXT: [[TMP11:%.*]] = ptrtoint double* [[ARRAYIDX3]] to i64 +// CHECK1-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] +// CHECK1-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr double, double* [[VLA]], i64 [[TMP13]] +// CHECK1-NEXT: [[TMP15:%.*]] = bitcast double* [[TMP14]] to [10 x [10 x [10 x double]]]* +// CHECK1-NEXT: [[DOTRD_INPUT_GEP_:%.*]] = getelementptr inbounds [1 x %struct.kmp_taskred_input_t], [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T:%.*]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 0 +// CHECK1-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYDECAY10:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX9]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY10]], i64 2 +// CHECK1-NEXT: [[ARRAYDECAY12:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX11]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX13:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY12]], i64 1 +// CHECK1-NEXT: [[ARRAYIDX14:%.*]] = getelementptr inbounds [10 x [10 x [10 x double]]], [10 x [10 x [10 x double]]]* [[TMP0]], i64 0, i64 1 +// CHECK1-NEXT: [[ARRAYDECAY15:%.*]] = getelementptr inbounds [10 x [10 x double]], [10 x [10 x double]]* [[ARRAYIDX14]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX16:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYDECAY15]], i64 5 +// CHECK1-NEXT: [[ARRAYDECAY17:%.*]] = getelementptr inbounds [10 x double], [10 x double]* [[ARRAYIDX16]], i64 0, i64 0 +// CHECK1-NEXT: [[ARRAYIDX18:%.*]] = getelementptr inbounds double, double* [[ARRAYDECAY17]], i64 1 +// CHECK1-NEXT: [[TMP17:%.*]] = bitcast double* [[VLA]] to i8* +// CHECK1-NEXT: store i8* [[TMP17]], i8** [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP19:%.*]] = bitcast double* [[ARRAYIDX13]] to i8* +// CHECK1-NEXT: store i8* [[TMP19]], i8** [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = ptrtoint double* [[ARRAYIDX18]] to i64 +// CHECK1-NEXT: [[TMP21:%.*]] = ptrtoint double* [[ARRAYIDX13]] to i64 +// CHECK1-NEXT: [[TMP22:%.*]] = sub i64 [[TMP20]], [[TMP21]] +// CHECK1-NEXT: [[TMP23:%.*]] = sdiv exact i64 [[TMP22]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) +// CHECK1-NEXT: [[TMP24:%.*]] = add nuw i64 [[TMP23]], 1 +// CHECK1-NEXT: [[TMP25:%.*]] = mul nuw i64 [[TMP24]], ptrtoint (double* getelementptr (double, double* null, i32 1) to i64) +// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 2 +// CHECK1-NEXT: store i64 [[TMP25]], i64* [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 3 +// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_init. to i8*), i8** [[TMP27]], align 8 +// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 5 +// CHECK1-NEXT: store i8* bitcast (void (i8*, i8*)* @.red_comb. to i8*), i8** [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASKRED_INPUT_T]], %struct.kmp_taskred_input_t* [[DOTRD_INPUT_GEP_]], i32 0, i32 6 +// CHECK1-NEXT: store i32 1, i32* [[TMP30]], align 8 +// CHECK1-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 +// CHECK1-NEXT: [[TMP33:%.*]] = bitcast [1 x %struct.kmp_taskred_input_t]* [[DOTRD_INPUT_]] to i8* +// CHECK1-NEXT: [[TMP34:%.*]] = call i8* @__kmpc_taskred_modifier_init(%struct.ident_t* @[[GLOB2]], i32 [[TMP32]], i32 1, i32 1, i8* [[TMP33]]) +// CHECK1-NEXT: store i8* [[TMP34]], i8** [[DOTTASK_RED_]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_init_8(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP36]], i32 34, i32* [[DOTOMP_IS_LAST]], i64* [[DOTOMP_LB]], i64* [[DOTOMP_UB]], i64* [[DOTOMP_STRIDE]], i64 1, i64 1) +// CHECK1-NEXT: [[TMP37:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[CMP:%.*]] = icmp sgt i64 [[TMP37]], 9 +// CHECK1-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK1: cond.true: +// CHECK1-NEXT: br label [[COND_END:%.*]] +// CHECK1: cond.false: +// CHECK1-NEXT: [[TMP38:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: br label [[COND_END]] +// CHECK1: cond.end: +// CHECK1-NEXT: [[COND:%.*]] = phi i64 [ 9, [[COND_TRUE]] ], [ [[TMP38]], [[COND_FALSE]] ] +// CHECK1-NEXT: store i64 [[COND]], i64* [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[TMP39:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8 +// CHECK1-NEXT: store i64 [[TMP39]], i64* [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK1: omp.inner.for.cond: +// CHECK1-NEXT: [[TMP40:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[TMP41:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8 +// CHECK1-NEXT: [[CMP19:%.*]] = icmp sle i64 [[TMP40]], [[TMP41]] +// CHECK1-NEXT: br i1 [[CMP19]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] +// CHECK1: omp.inner.for.cond.cleanup: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_END:%.*]] +// CHECK1: omp.inner.for.body: +// CHECK1-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[MUL:%.*]] = mul nsw i64 [[TMP42]], 1 +// CHECK1-NEXT: [[ADD:%.*]] = add nsw i64 0, [[MUL]] +// CHECK1-NEXT: store i64 [[ADD]], i64* [[I]], align 8 +// CHECK1-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK1: omp.body.continue: +// CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK1: omp.inner.for.inc: +// CHECK1-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: [[ADD20:%.*]] = add nsw i64 [[TMP43]], 1 +// CHECK1-NEXT: store i64 [[ADD20]], i64* [[DOTOMP_IV]], align 8 +// CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK1: omp.inner.for.end: +// CHECK1-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK1: omp.loop.exit: +// CHECK1-NEXT: [[TMP44:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP45:%.*]] = load i32, i32* [[TMP44]], align 4 +// CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB3]], i32 [[TMP45]]) +// CHECK1-NEXT: [[TMP46:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[TMP46]], align 4 +// CHECK1-NEXT: call void @__kmpc_task_reduction_modifier_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP47]], i32 1) +// CHECK1-NEXT: [[TMP48:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP49:%.*]] = bitcast double* [[VLA]] to i8* +// CHECK1-NEXT: store i8* [[TMP49]], i8** [[TMP48]], align 8 +// CHECK1-NEXT: [[TMP50:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP51:%.*]] = inttoptr i64 [[TMP5]] to i8* +// CHECK1-NEXT: store i8* [[TMP51]], i8** [[TMP50]], align 8 +// CHECK1-NEXT: [[TMP52:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 +// CHECK1-NEXT: [[TMP53:%.*]] = load i32, i32* [[TMP52]], align 4 +// CHECK1-NEXT: [[TMP54:%.*]] = bitcast [2 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK1-NEXT: [[TMP55:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], i32 1, i64 16, i8* [[TMP54]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: switch i32 [[TMP55]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK1-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK1-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK1-NEXT: ] +// CHECK1: .omp.reduction.case1: +// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP56]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK1: omp.arraycpy.body: +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP57:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 +// CHECK1-NEXT: [[TMP58:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 +// CHECK1-NEXT: [[ADD22:%.*]] = fadd double [[TMP57]], [[TMP58]] +// CHECK1-NEXT: store double [[ADD22]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], align 8 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT23]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT23]], [[TMP56]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY]] +// CHECK1: omp.arraycpy.done25: +// CHECK1-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB1]], i32 [[TMP53]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK1: .omp.reduction.case2: +// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr double, double* [[ARRAYIDX3]], i64 [[TMP5]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY26:%.*]] = icmp eq double* [[ARRAYIDX3]], [[TMP59]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY26]], label [[OMP_ARRAYCPY_DONE35:%.*]], label [[OMP_ARRAYCPY_BODY27:%.*]] +// CHECK1: omp.arraycpy.body27: +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST28:%.*]] = phi double* [ [[VLA]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT33:%.*]], [[ATOMIC_EXIT:%.*]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST29:%.*]] = phi double* [ [[ARRAYIDX3]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT32:%.*]], [[ATOMIC_EXIT]] ] +// CHECK1-NEXT: [[TMP60:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = bitcast double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]] to i64* +// CHECK1-NEXT: [[ATOMIC_LOAD:%.*]] = load atomic i64, i64* [[TMP61]] monotonic, align 8 +// CHECK1-NEXT: br label [[ATOMIC_CONT:%.*]] +// CHECK1: atomic_cont: +// CHECK1-NEXT: [[TMP62:%.*]] = phi i64 [ [[ATOMIC_LOAD]], [[OMP_ARRAYCPY_BODY27]] ], [ [[TMP70:%.*]], [[ATOMIC_CONT]] ] +// CHECK1-NEXT: [[TMP63:%.*]] = bitcast double* [[ATOMIC_TEMP]] to i64* +// CHECK1-NEXT: [[TMP64:%.*]] = bitcast i64 [[TMP62]] to double +// CHECK1-NEXT: store double [[TMP64]], double* [[_TMP30]], align 8 +// CHECK1-NEXT: [[TMP65:%.*]] = load double, double* [[_TMP30]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], align 8 +// CHECK1-NEXT: [[ADD31:%.*]] = fadd double [[TMP65]], [[TMP66]] +// CHECK1-NEXT: store double [[ADD31]], double* [[ATOMIC_TEMP]], align 8 +// CHECK1-NEXT: [[TMP67:%.*]] = load i64, i64* [[TMP63]], align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = bitcast double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]] to i64* +// CHECK1-NEXT: [[TMP69:%.*]] = cmpxchg i64* [[TMP68]], i64 [[TMP62]], i64 [[TMP67]] monotonic monotonic, align 8 +// CHECK1-NEXT: [[TMP70]] = extractvalue { i64, i1 } [[TMP69]], 0 +// CHECK1-NEXT: [[TMP71:%.*]] = extractvalue { i64, i1 } [[TMP69]], 1 +// CHECK1-NEXT: br i1 [[TMP71]], label [[ATOMIC_EXIT]], label [[ATOMIC_CONT]] +// CHECK1: atomic_exit: +// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT32]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST29]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT33]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST28]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE34:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT32]], [[TMP59]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE34]], label [[OMP_ARRAYCPY_DONE35]], label [[OMP_ARRAYCPY_BODY27]] +// CHECK1: omp.arraycpy.done35: +// CHECK1-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK1: .omp.reduction.default: +// CHECK1-NEXT: [[TMP72:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP72]]) +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@.red_init. +// CHECK1-SAME: (i8* noalias noundef [[TMP0:%.*]], i8* noalias noundef [[TMP1:%.*]]) #[[ATTR4]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP2]] to double* +// CHECK1-NEXT: [[TMP5:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP3]] +// CHECK1-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq double* [[TMP4]], [[TMP5]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] +// CHECK1: omp.arrayinit.body: +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP4]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] +// CHECK1-NEXT: store double 0.000000e+00, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP5]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] +// CHECK1: omp.arrayinit.done: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@.red_comb. +// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i64, i64* @{{reduction_size[.].+[.]}}, align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = load i8*, i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to double* +// CHECK1-NEXT: [[TMP5:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP6:%.*]] = bitcast i8* [[TMP5]] to double* +// CHECK1-NEXT: [[TMP7:%.*]] = getelementptr double, double* [[TMP4]], i64 [[TMP2]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP4]], [[TMP7]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK1: omp.arraycpy.body: +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP4]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP8:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 +// CHECK1-NEXT: [[TMP9:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 +// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP8]], [[TMP9]] +// CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP7]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] +// CHECK1: omp.arraycpy.done2: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 +// CHECK1-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR4]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 8 +// CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 8 +// CHECK1-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [2 x i8*]* +// CHECK1-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 8 +// CHECK1-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [2 x i8*]* +// CHECK1-NEXT: [[TMP6:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP5]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8 +// CHECK1-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to double* +// CHECK1-NEXT: [[TMP9:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 0 +// CHECK1-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8 +// CHECK1-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to double* +// CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[TMP3]], i64 0, i64 1 +// CHECK1-NEXT: [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8 +// CHECK1-NEXT: [[TMP14:%.*]] = ptrtoint i8* [[TMP13]] to i64 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr double, double* [[TMP11]], i64 [[TMP14]] +// CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq double* [[TMP11]], [[TMP15]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK1: omp.arraycpy.body: +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi double* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi double* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK1-NEXT: [[TMP16:%.*]] = load double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = load double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 8 +// CHECK1-NEXT: [[ADD:%.*]] = fadd double [[TMP16]], [[TMP17]] +// CHECK1-NEXT: store double [[ADD]], double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 8 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr double, double* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq double* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP15]] +// CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] +// CHECK1: omp.arraycpy.done2: +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@_ZN2S2C2Ev +// CHECK1-SAME: (%class.S2* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %class.S2*, align 8 +// CHECK1-NEXT: store %class.S2* [[THIS]], %class.S2** [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[THIS1:%.*]] = load %class.S2*, %class.S2** [[THIS_ADDR]], align 8 +// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[CLASS_S2:%.*]], %class.S2* [[THIS1]], i32 0, i32 0 +// CHECK1-NEXT: store i32 0, i32* [[A]], align 4 +// CHECK1-NEXT: ret void +// +// +// CHECK1-LABEL: define {{[^@]+}}@main +// CHECK1-SAME: () #[[ATTR10:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: [[A:%.*]] = alloca i32, align 4 +// CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 +// CHECK1-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z3barv() +// CHECK1-NEXT: store i32 [[CALL]], i32* [[A]], align 4 +// CHECK1-NEXT: ret i32 0 +// +// +// CHECK1-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg +// CHECK1-SAME: () #[[ATTR11:[0-9]+]] { +// CHECK1-NEXT: entry: +// CHECK1-NEXT: call void @__tgt_register_requires(i64 1) +// CHECK1-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@_Z3sumPiiS_ +// CHECK2-SAME: (i32* noundef [[INPUT:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 +// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[SIZE_CASTED4:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS7:%.*]] = alloca [3 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_PTRS8:%.*]] = alloca [3 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS9:%.*]] = alloca [3 x i8*], align 4 +// CHECK2-NEXT: [[_TMP10:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_12:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[A:%.*]] = alloca [10 x i32], align 4 +// CHECK2-NEXT: [[SIZE_CASTED19:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [2 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [2 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [2 x i8*], align 4 +// CHECK2-NEXT: [[SIZE_CASTED26:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS28:%.*]] = alloca [2 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_PTRS29:%.*]] = alloca [2 x i8*], align 4 +// CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS30:%.*]] = alloca [2 x i8*], align 4 +// CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP5]], i32 0 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 0 +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = mul nuw i32 [[TMP8]], 4 +// CHECK2-NEXT: [[TMP10:%.*]] = sext i32 [[TMP9]] to i64 +// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP12]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK2-NEXT: store i32 [[TMP1]], i32* [[TMP14]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK2-NEXT: store i32* [[TMP4]], i32** [[TMP17]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** +// CHECK2-NEXT: store i32* [[ARRAYIDX]], i32** [[TMP19]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32** +// CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP22]], align 4 +// CHECK2-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** +// CHECK2-NEXT: store i32* [[ARRAYIDX1]], i32** [[TMP24]], align 4 +// CHECK2-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP28]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP29]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP30]], 1 +// CHECK2-NEXT: [[TMP31:%.*]] = zext i32 [[ADD]] to i64 +// CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP31]]) +// CHECK2-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69.region_id, i32 3, i8** [[TMP26]], i8** [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 +// CHECK2-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK2: omp_offload.failed: +// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69(i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) #[[ATTR2:[0-9]+]] +// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] +// CHECK2: omp_offload.cont: +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP34]], i32* [[SIZE_CASTED4]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32, i32* [[SIZE_CASTED4]], align 4 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP38:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i32, i32* [[TMP39]], i32 0 +// CHECK2-NEXT: [[TMP40:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP41]], i32 0 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: [[TMP43:%.*]] = mul nuw i32 [[TMP42]], 4 +// CHECK2-NEXT: [[TMP44:%.*]] = sext i32 [[TMP43]] to i64 +// CHECK2-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* +// CHECK2-NEXT: store i32 [[TMP35]], i32* [[TMP46]], align 4 +// CHECK2-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32* +// CHECK2-NEXT: store i32 [[TMP35]], i32* [[TMP48]], align 4 +// CHECK2-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP49]], align 4 +// CHECK2-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** +// CHECK2-NEXT: store i32* [[TMP38]], i32** [[TMP51]], align 4 +// CHECK2-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32** +// CHECK2-NEXT: store i32* [[ARRAYIDX5]], i32** [[TMP53]], align 4 +// CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP54]], align 4 +// CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32** +// CHECK2-NEXT: store i32* [[TMP40]], i32** [[TMP56]], align 4 +// CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32** +// CHECK2-NEXT: store i32* [[ARRAYIDX6]], i32** [[TMP58]], align 4 +// CHECK2-NEXT: store i64 [[TMP44]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.7, i32 0, i32 2), align 4 +// CHECK2-NEXT: [[TMP59:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS9]], i32 0, i32 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP59]], align 4 +// CHECK2-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP62:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP62]], i32* [[DOTCAPTURE_EXPR_11]], align 4 +// CHECK2-NEXT: [[TMP63:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 +// CHECK2-NEXT: [[SUB13:%.*]] = sub nsw i32 [[TMP63]], 0 +// CHECK2-NEXT: [[DIV14:%.*]] = sdiv i32 [[SUB13]], 1 +// CHECK2-NEXT: [[SUB15:%.*]] = sub nsw i32 [[DIV14]], 1 +// CHECK2-NEXT: store i32 [[SUB15]], i32* [[DOTCAPTURE_EXPR_12]], align 4 +// CHECK2-NEXT: [[TMP64:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_12]], align 4 +// CHECK2-NEXT: [[ADD16:%.*]] = add nsw i32 [[TMP64]], 1 +// CHECK2-NEXT: [[TMP65:%.*]] = zext i32 [[ADD16]] to i64 +// CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 [[TMP65]]) +// CHECK2-NEXT: [[TMP66:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73.region_id, i32 3, i8** [[TMP60]], i8** [[TMP61]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.7, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0 +// CHECK2-NEXT: br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK2: omp_offload.failed17: +// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73(i32 [[TMP35]], i32* [[TMP36]], i32* [[TMP37]]) #[[ATTR2]] +// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT18]] +// CHECK2: omp_offload.cont18: +// CHECK2-NEXT: [[TMP68:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP68]], i32* [[SIZE_CASTED19]], align 4 +// CHECK2-NEXT: [[TMP69:%.*]] = load i32, i32* [[SIZE_CASTED19]], align 4 +// CHECK2-NEXT: [[ARRAYIDX20:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK2-NEXT: store i32 [[TMP69]], i32* [[TMP71]], align 4 +// CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* +// CHECK2-NEXT: store i32 [[TMP69]], i32* [[TMP73]], align 4 +// CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP74]], align 4 +// CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [10 x i32]** +// CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP76]], align 4 +// CHECK2-NEXT: [[TMP77:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK2-NEXT: store i32* [[ARRAYIDX20]], i32** [[TMP78]], align 4 +// CHECK2-NEXT: [[TMP79:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP79]], align 4 +// CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP82:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78.region_id, i32 2, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP83:%.*]] = icmp ne i32 [[TMP82]], 0 +// CHECK2-NEXT: br i1 [[TMP83]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] +// CHECK2: omp_offload.failed24: +// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78(i32 [[TMP69]], [10 x i32]* [[A]]) #[[ATTR2]] +// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT25]] +// CHECK2: omp_offload.cont25: +// CHECK2-NEXT: [[TMP84:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP84]], i32* [[SIZE_CASTED26]], align 4 +// CHECK2-NEXT: [[TMP85:%.*]] = load i32, i32* [[SIZE_CASTED26]], align 4 +// CHECK2-NEXT: [[ARRAYIDX27:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[A]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS28]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32* +// CHECK2-NEXT: store i32 [[TMP85]], i32* [[TMP87]], align 4 +// CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS29]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32* +// CHECK2-NEXT: store i32 [[TMP85]], i32* [[TMP89]], align 4 +// CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS30]], i32 0, i32 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP90]], align 4 +// CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS28]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to [10 x i32]** +// CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[TMP92]], align 4 +// CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS29]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32** +// CHECK2-NEXT: store i32* [[ARRAYIDX27]], i32** [[TMP94]], align 4 +// CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_MAPPERS30]], i32 0, i32 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP95]], align 4 +// CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS28]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS29]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP98:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81.region_id, i32 2, i8** [[TMP96]], i8** [[TMP97]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP99:%.*]] = icmp ne i32 [[TMP98]], 0 +// CHECK2-NEXT: br i1 [[TMP99]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK2: omp_offload.failed31: +// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81(i32 [[TMP85]], [10 x i32]* [[A]]) #[[ATTR2]] +// CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK2: omp_offload.cont32: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l69 +// CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1:[0-9]+]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. +// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[OUTPUT1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_3:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 +// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 +// CHECK2-NEXT: store i32 0, i32* [[OUTPUT1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 +// CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 +// CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] +// CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[OUTPUT1]], i64 [[TMP5]] +// CHECK2-NEXT: store i32* [[TMP6]], i32** [[TMP]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP8]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB4:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB4]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[I]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP9]] +// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] +// CHECK2: omp.precond.then: +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP12]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK2: cond.true: +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK2-NEXT: br label [[COND_END:%.*]] +// CHECK2: cond.false: +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: br label [[COND_END]] +// CHECK2: cond.end: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP15]], [[COND_TRUE]] ], [ [[TMP16]], [[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 +// CHECK2-NEXT: store i32 [[TMP17]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK2: omp.inner.for.cond: +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP18]], [[TMP19]] +// CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK2: omp.inner.for.body: +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP22]], i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32*, i32** [[TMP]], align 4 +// CHECK2-NEXT: [[TMP25:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32 [[TMP20]], i32 [[TMP21]], i32 [[TMP23]], i32* [[TMP24]], i32* [[TMP25]]) +// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK2: omp.inner.for.inc: +// CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP26]], [[TMP27]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK2: omp.inner.for.end: +// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK2: omp.loop.exit: +// CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[TMP28]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP29]]) +// CHECK2-NEXT: br label [[OMP_PRECOND_END]] +// CHECK2: omp.precond.end: +// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i32* [[OUTPUT1]] to i8* +// CHECK2-NEXT: store i8* [[TMP31]], i8** [[TMP30]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 +// CHECK2-NEXT: [[TMP34:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK2-NEXT: [[TMP35:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP33]], i32 1, i32 4, i8* [[TMP34]], void (i8*, i8*)* @.omp.reduction.reduction_func.2, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: switch i32 [[TMP35]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK2-NEXT: ] +// CHECK2: .omp.reduction.case1: +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[OUTPUT1]], align 4 +// CHECK2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP36]], [[TMP37]] +// CHECK2-NEXT: store i32 [[ADD8]], i32* [[ARRAYIDX]], align 4 +// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP33]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.case2: +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT1]], align 4 +// CHECK2-NEXT: [[TMP39:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP38]] monotonic, align 4 +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.default: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..1 +// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT3:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[_TMP4:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[I5:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 +// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[I]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] +// CHECK2: omp.precond.then: +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 +// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 +// CHECK2-NEXT: store i32 0, i32* [[OUTPUT3]], align 4 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = ptrtoint i32* [[TMP7]] to i64 +// CHECK2-NEXT: [[TMP9:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 +// CHECK2-NEXT: [[TMP10:%.*]] = sub i64 [[TMP8]], [[TMP9]] +// CHECK2-NEXT: [[TMP11:%.*]] = sdiv exact i64 [[TMP10]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) +// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[OUTPUT3]], i64 [[TMP11]] +// CHECK2-NEXT: store i32* [[TMP12]], i32** [[_TMP4]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP14]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[CMP6:%.*]] = icmp sgt i32 [[TMP15]], [[TMP16]] +// CHECK2-NEXT: br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK2: cond.true: +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: br label [[COND_END:%.*]] +// CHECK2: cond.false: +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: br label [[COND_END]] +// CHECK2: cond.end: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP17]], [[COND_TRUE]] ], [ [[TMP18]], [[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 +// CHECK2-NEXT: store i32 [[TMP19]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK2: omp.inner.for.cond: +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[CMP7:%.*]] = icmp sle i32 [[TMP20]], [[TMP21]] +// CHECK2-NEXT: br i1 [[CMP7]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK2: omp.inner.for.body: +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP22]], 1 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[I5]], align 4 +// CHECK2-NEXT: [[TMP23:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[I5]], align 4 +// CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[TMP23]], i32 [[TMP24]] +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[ARRAYIDX8]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[_TMP4]], align 4 +// CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 0 +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 +// CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP27]], [[TMP25]] +// CHECK2-NEXT: store i32 [[ADD10]], i32* [[ARRAYIDX9]], align 4 +// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK2: omp.body.continue: +// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK2: omp.inner.for.inc: +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP28]], 1 +// CHECK2-NEXT: store i32 [[ADD11]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK2: omp.inner.for.end: +// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK2: omp.loop.exit: +// CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[TMP29]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP30]]) +// CHECK2-NEXT: [[TMP31:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP32:%.*]] = bitcast i32* [[OUTPUT3]] to i8* +// CHECK2-NEXT: store i8* [[TMP32]], i8** [[TMP31]], align 4 +// CHECK2-NEXT: [[TMP33:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP34:%.*]] = load i32, i32* [[TMP33]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK2-NEXT: [[TMP36:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], i32 1, i32 4, i8* [[TMP35]], void (i8*, i8*)* @.omp.reduction.reduction_func, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: switch i32 [[TMP36]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK2-NEXT: ] +// CHECK2: .omp.reduction.case1: +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK2-NEXT: [[TMP38:%.*]] = load i32, i32* [[OUTPUT3]], align 4 +// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP37]], [[TMP38]] +// CHECK2-NEXT: store i32 [[ADD12]], i32* [[ARRAYIDX]], align 4 +// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP34]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.case2: +// CHECK2-NEXT: [[TMP39:%.*]] = load i32, i32* [[OUTPUT3]], align 4 +// CHECK2-NEXT: [[TMP40:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP39]] monotonic, align 4 +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.default: +// CHECK2-NEXT: br label [[OMP_PRECOND_END]] +// CHECK2: omp.precond.end: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func +// CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.2 +// CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l73 +// CHECK2-SAME: (i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB4]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32*, i32*)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP1]], i32* [[TMP2]], i32* [[TMP3]]) +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[OUTPUT2:%.*]] = alloca [3 x i32], align 4 +// CHECK2-NEXT: [[TMP:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[_TMP3:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_4:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_COMB_LB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_COMB_UB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 +// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP0]], i32 0 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, i32* [[TMP1]], i32 2 +// CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT2]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP2:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP2]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] +// CHECK2: omp.arrayinit.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] +// CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP2]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] +// CHECK2: omp.arrayinit.done: +// CHECK2-NEXT: [[TMP3:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[TMP3]] to i64 +// CHECK2-NEXT: [[TMP5:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 +// CHECK2-NEXT: [[TMP6:%.*]] = sub i64 [[TMP4]], [[TMP5]] +// CHECK2-NEXT: [[TMP7:%.*]] = sdiv exact i64 [[TMP6]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) +// CHECK2-NEXT: [[TMP8:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* +// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr i32, i32* [[TMP8]], i64 [[TMP7]] +// CHECK2-NEXT: store i32* [[TMP9]], i32** [[TMP]], align 4 +// CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT2]] to i32* +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP11]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB5:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB5]], i32* [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[I]], align 4 +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP12]] +// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] +// CHECK2: omp.precond.then: +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_COMB_LB]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK2-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[TMP14]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP15]], i32 92, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_COMB_LB]], i32* [[DOTOMP_COMB_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP16]], [[TMP17]] +// CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK2: cond.true: +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_4]], align 4 +// CHECK2-NEXT: br label [[COND_END:%.*]] +// CHECK2: cond.false: +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: br label [[COND_END]] +// CHECK2: cond.end: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP18]], [[COND_TRUE]] ], [ [[TMP19]], [[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 +// CHECK2-NEXT: store i32 [[TMP20]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK2: omp.inner.for.cond: +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP21]], [[TMP22]] +// CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK2: omp.inner.for.body: +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP25]], i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP27:%.*]] = load i32*, i32** [[TMP]], align 4 +// CHECK2-NEXT: [[TMP28:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32*, i32*)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP23]], i32 [[TMP24]], i32 [[TMP26]], i32* [[TMP27]], i32* [[TMP28]]) +// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK2: omp.inner.for.inc: +// CHECK2-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[DOTOMP_STRIDE]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], [[TMP30]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK2: omp.inner.for.end: +// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK2: omp.loop.exit: +// CHECK2-NEXT: [[TMP31:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP32:%.*]] = load i32, i32* [[TMP31]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP32]]) +// CHECK2-NEXT: br label [[OMP_PRECOND_END]] +// CHECK2: omp.precond.end: +// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP34:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* +// CHECK2-NEXT: store i8* [[TMP34]], i8** [[TMP33]], align 4 +// CHECK2-NEXT: [[TMP35:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32, i32* [[TMP35]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK2-NEXT: [[TMP38:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], i32 1, i32 4, i8* [[TMP37]], void (i8*, i8*)* @.omp.reduction.reduction_func.6, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: switch i32 [[TMP38]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK2-NEXT: ] +// CHECK2: .omp.reduction.case1: +// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP39]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE13:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK2: omp.arraycpy.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST9:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[TMP40:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 +// CHECK2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP40]], [[TMP41]] +// CHECK2-NEXT: store i32 [[ADD10]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST9]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE12:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP39]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE12]], label [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_BODY]] +// CHECK2: omp.arraycpy.done13: +// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP36]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.case2: +// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY14:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP42]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY14]], label [[OMP_ARRAYCPY_DONE21:%.*]], label [[OMP_ARRAYCPY_BODY15:%.*]] +// CHECK2: omp.arraycpy.body15: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST16:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT19:%.*]], [[OMP_ARRAYCPY_BODY15]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST17:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT18:%.*]], [[OMP_ARRAYCPY_BODY15]] ] +// CHECK2-NEXT: [[TMP43:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], align 4 +// CHECK2-NEXT: [[TMP44:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 [[TMP43]] monotonic, align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT18]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST17]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT19]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST16]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE20:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT18]], [[TMP42]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE20]], label [[OMP_ARRAYCPY_DONE21]], label [[OMP_ARRAYCPY_BODY15]] +// CHECK2: omp.arraycpy.done21: +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.default: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..4 +// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[SIZE:%.*]], i32* noundef [[OUTPUT:%.*]], i32* noundef [[INPUT:%.*]]) #[[ATTR1]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTPREVIOUS_LB__ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTPREVIOUS_UB__ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[INPUT_ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[OUTPUT4:%.*]] = alloca [3 x i32], align 4 +// CHECK2-NEXT: [[_TMP5:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[I6:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 +// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[DOTPREVIOUS_LB_]], i32* [[DOTPREVIOUS_LB__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[DOTPREVIOUS_UB_]], i32* [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[OUTPUT]], i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: store i32* [[INPUT]], i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP0]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP1]], 0 +// CHECK2-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 +// CHECK2-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 +// CHECK2-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[I]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 0, [[TMP2]] +// CHECK2-NEXT: br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]] +// CHECK2: omp.precond.then: +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_LB]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: store i32 [[TMP3]], i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTPREVIOUS_LB__ADDR]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTPREVIOUS_UB__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP4]], i32* [[DOTOMP_LB]], align 4 +// CHECK2-NEXT: store i32 [[TMP5]], i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: store i32 1, i32* [[DOTOMP_STRIDE]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[DOTOMP_IS_LAST]], align 4 +// CHECK2-NEXT: [[TMP6:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[TMP6]], i32 0 +// CHECK2-NEXT: [[TMP7:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds i32, i32* [[TMP7]], i32 2 +// CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [3 x i32], [3 x i32]* [[OUTPUT4]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP8]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] +// CHECK2: omp.arrayinit.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[OMP_PRECOND_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] +// CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] +// CHECK2: omp.arrayinit.done: +// CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[OUTPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP10:%.*]] = ptrtoint i32* [[TMP9]] to i64 +// CHECK2-NEXT: [[TMP11:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 +// CHECK2-NEXT: [[TMP12:%.*]] = sub i64 [[TMP10]], [[TMP11]] +// CHECK2-NEXT: [[TMP13:%.*]] = sdiv exact i64 [[TMP12]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) +// CHECK2-NEXT: [[TMP14:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr i32, i32* [[TMP14]], i64 [[TMP13]] +// CHECK2-NEXT: store i32* [[TMP15]], i32** [[_TMP5]], align 4 +// CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [3 x i32]* [[OUTPUT4]] to i32* +// CHECK2-NEXT: [[TMP16:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[TMP16]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP17]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1) +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: [[CMP7:%.*]] = icmp sgt i32 [[TMP18]], [[TMP19]] +// CHECK2-NEXT: br i1 [[CMP7]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] +// CHECK2: cond.true: +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK2-NEXT: br label [[COND_END:%.*]] +// CHECK2: cond.false: +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: br label [[COND_END]] +// CHECK2: cond.end: +// CHECK2-NEXT: [[COND:%.*]] = phi i32 [ [[TMP20]], [[COND_TRUE]] ], [ [[TMP21]], [[COND_FALSE]] ] +// CHECK2-NEXT: store i32 [[COND]], i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4 +// CHECK2-NEXT: store i32 [[TMP22]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] +// CHECK2: omp.inner.for.cond: +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4 +// CHECK2-NEXT: [[CMP8:%.*]] = icmp sle i32 [[TMP23]], [[TMP24]] +// CHECK2-NEXT: br i1 [[CMP8]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] +// CHECK2: omp.inner.for.body: +// CHECK2-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP25]], 1 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[I6]], align 4 +// CHECK2-NEXT: [[TMP26:%.*]] = load i32*, i32** [[INPUT_ADDR]], align 4 +// CHECK2-NEXT: [[TMP27:%.*]] = load i32, i32* [[I6]], align 4 +// CHECK2-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds i32, i32* [[TMP26]], i32 [[TMP27]] +// CHECK2-NEXT: [[TMP28:%.*]] = load i32, i32* [[ARRAYIDX9]], align 4 +// CHECK2-NEXT: [[TMP29:%.*]] = load i32*, i32** [[_TMP5]], align 4 +// CHECK2-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds i32, i32* [[TMP29]], i32 0 +// CHECK2-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX10]], align 4 +// CHECK2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP30]], [[TMP28]] +// CHECK2-NEXT: store i32 [[ADD11]], i32* [[ARRAYIDX10]], align 4 +// CHECK2-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] +// CHECK2: omp.body.continue: +// CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] +// CHECK2: omp.inner.for.inc: +// CHECK2-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP31]], 1 +// CHECK2-NEXT: store i32 [[ADD12]], i32* [[DOTOMP_IV]], align 4 +// CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] +// CHECK2: omp.inner.for.end: +// CHECK2-NEXT: br label [[OMP_LOOP_EXIT:%.*]] +// CHECK2: omp.loop.exit: +// CHECK2-NEXT: [[TMP32:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP33:%.*]] = load i32, i32* [[TMP32]], align 4 +// CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP33]]) +// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* +// CHECK2-NEXT: store i8* [[TMP35]], i8** [[TMP34]], align 4 +// CHECK2-NEXT: [[TMP36:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP37:%.*]] = load i32, i32* [[TMP36]], align 4 +// CHECK2-NEXT: [[TMP38:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK2-NEXT: [[TMP39:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], i32 1, i32 4, i8* [[TMP38]], void (i8*, i8*)* @.omp.reduction.reduction_func.5, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: switch i32 [[TMP39]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK2-NEXT: ] +// CHECK2: .omp.reduction.case1: +// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP40]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE17:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK2: omp.arraycpy.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST13:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT15:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 +// CHECK2-NEXT: [[TMP42:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 +// CHECK2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP41]], [[TMP42]] +// CHECK2-NEXT: store i32 [[ADD14]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT15]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST13]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE16:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT15]], [[TMP40]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE16]], label [[OMP_ARRAYCPY_DONE17]], label [[OMP_ARRAYCPY_BODY]] +// CHECK2: omp.arraycpy.done17: +// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP37]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.case2: +// CHECK2-NEXT: [[TMP43:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY18:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP43]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY18]], label [[OMP_ARRAYCPY_DONE25:%.*]], label [[OMP_ARRAYCPY_BODY19:%.*]] +// CHECK2: omp.arraycpy.body19: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST20:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT23:%.*]], [[OMP_ARRAYCPY_BODY19]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST21:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT22:%.*]], [[OMP_ARRAYCPY_BODY19]] ] +// CHECK2-NEXT: [[TMP44:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], align 4 +// CHECK2-NEXT: [[TMP45:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 [[TMP44]] monotonic, align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT22]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST21]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT23]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST20]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE24:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT22]], [[TMP43]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE24]], label [[OMP_ARRAYCPY_DONE25]], label [[OMP_ARRAYCPY_BODY19]] +// CHECK2: omp.arraycpy.done25: +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.default: +// CHECK2-NEXT: br label [[OMP_PRECOND_END]] +// CHECK2: omp.precond.end: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.5 +// CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* +// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK2: omp.arraycpy.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] +// CHECK2: omp.arraycpy.done2: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.6 +// CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* +// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 3 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK2: omp.arraycpy.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] +// CHECK2: omp.arraycpy.done2: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l78 +// CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR5:[0-9]+]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 +// CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 +// CHECK2-NEXT: [[A2:%.*]] = alloca [2 x i32], align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 +// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 0 +// CHECK2-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 1 +// CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[A2]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP1:%.*]] = getelementptr i32, i32* [[ARRAY_BEGIN]], i32 2 +// CHECK2-NEXT: [[OMP_ARRAYINIT_ISEMPTY:%.*]] = icmp eq i32* [[ARRAY_BEGIN]], [[TMP1]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYINIT_ISEMPTY]], label [[OMP_ARRAYINIT_DONE:%.*]], label [[OMP_ARRAYINIT_BODY:%.*]] +// CHECK2: omp.arrayinit.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[ARRAY_BEGIN]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYINIT_BODY]] ] +// CHECK2-NEXT: store i32 0, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP1]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYINIT_DONE]], label [[OMP_ARRAYINIT_BODY]] +// CHECK2: omp.arrayinit.done: +// CHECK2-NEXT: [[TMP2:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* +// CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[TMP2]] to i64 +// CHECK2-NEXT: [[TMP4:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 +// CHECK2-NEXT: [[TMP5:%.*]] = sub i64 [[TMP3]], [[TMP4]] +// CHECK2-NEXT: [[TMP6:%.*]] = sdiv exact i64 [[TMP5]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) +// CHECK2-NEXT: [[TMP7:%.*]] = bitcast [2 x i32]* [[A2]] to i32* +// CHECK2-NEXT: [[TMP8:%.*]] = getelementptr i32, i32* [[TMP7]], i64 [[TMP6]] +// CHECK2-NEXT: [[TMP9:%.*]] = bitcast i32* [[TMP8]] to [10 x i32]* +// CHECK2-NEXT: [[RHS_BEGIN:%.*]] = bitcast [2 x i32]* [[A2]] to i32* +// CHECK2-NEXT: store i32 0, i32* [[I]], align 4 +// CHECK2-NEXT: br label [[FOR_COND:%.*]] +// CHECK2: for.cond: +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP10]], [[TMP11]] +// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] +// CHECK2: for.body: +// CHECK2-NEXT: br label [[FOR_INC:%.*]] +// CHECK2: for.inc: +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[I]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP12]], 1 +// CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 +// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP10:![0-9]+]] +// CHECK2: for.end: +// CHECK2-NEXT: [[TMP13:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP14:%.*]] = bitcast i32* [[RHS_BEGIN]] to i8* +// CHECK2-NEXT: store i8* [[TMP14]], i8** [[TMP13]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[TMP15]], align 4 +// CHECK2-NEXT: [[TMP17:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK2-NEXT: [[TMP18:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], i32 1, i32 4, i8* [[TMP17]], void (i8*, i8*)* @.omp.reduction.reduction_func.10, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: switch i32 [[TMP18]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK2-NEXT: ] +// CHECK2: .omp.reduction.case1: +// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP19]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE6:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK2: omp.arraycpy.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST3:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE1]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT4:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[TMP20:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 +// CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP20]], [[TMP21]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT4]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST3]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE5:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT4]], [[TMP19]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE5]], label [[OMP_ARRAYCPY_DONE6]], label [[OMP_ARRAYCPY_BODY]] +// CHECK2: omp.arraycpy.done6: +// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP16]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.case2: +// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr i32, i32* [[ARRAYIDX]], i32 2 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY7:%.*]] = icmp eq i32* [[ARRAYIDX]], [[TMP22]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY7]], label [[OMP_ARRAYCPY_DONE14:%.*]], label [[OMP_ARRAYCPY_BODY8:%.*]] +// CHECK2: omp.arraycpy.body8: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST9:%.*]] = phi i32* [ [[RHS_BEGIN]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT12:%.*]], [[OMP_ARRAYCPY_BODY8]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST10:%.*]] = phi i32* [ [[ARRAYIDX]], [[DOTOMP_REDUCTION_CASE2]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT11:%.*]], [[OMP_ARRAYCPY_BODY8]] ] +// CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], align 4 +// CHECK2-NEXT: [[TMP24:%.*]] = atomicrmw add i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 [[TMP23]] monotonic, align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT11]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST10]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT12]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST9]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE13:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT11]], [[TMP22]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE13]], label [[OMP_ARRAYCPY_DONE14]], label [[OMP_ARRAYCPY_BODY8]] +// CHECK2: omp.arraycpy.done14: +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.default: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.10 +// CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* +// CHECK2-NEXT: [[TMP12:%.*]] = getelementptr i32, i32* [[TMP11]], i32 2 +// CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq i32* [[TMP11]], [[TMP12]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE2:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] +// CHECK2: omp.arraycpy.body: +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi i32* [ [[TMP8]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi i32* [ [[TMP11]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP13]], [[TMP14]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], align 4 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr i32, i32* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 +// CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq i32* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP12]] +// CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE2]], label [[OMP_ARRAYCPY_BODY]] +// CHECK2: omp.arraycpy.done2: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3sumPiiS__l81 +// CHECK2-SAME: (i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR5]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 +// CHECK2-NEXT: [[SIZE_CASTED:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store i32 [[TMP1]], i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[SIZE_CASTED]], align 4 +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB4]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[SIZE:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR1]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[SIZE_ADDR:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 +// CHECK2-NEXT: [[A1:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[I:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[DOTOMP_REDUCTION_RED_LIST:%.*]] = alloca [1 x i8*], align 4 +// CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 +// CHECK2-NEXT: store i32 [[SIZE]], i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [10 x i32], [10 x i32]* [[TMP0]], i32 0, i32 3 +// CHECK2-NEXT: store i32 0, i32* [[A1]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = bitcast [10 x i32]* [[TMP0]] to i32* +// CHECK2-NEXT: [[TMP2:%.*]] = ptrtoint i32* [[TMP1]] to i64 +// CHECK2-NEXT: [[TMP3:%.*]] = ptrtoint i32* [[ARRAYIDX]] to i64 +// CHECK2-NEXT: [[TMP4:%.*]] = sub i64 [[TMP2]], [[TMP3]] +// CHECK2-NEXT: [[TMP5:%.*]] = sdiv exact i64 [[TMP4]], ptrtoint (i32* getelementptr (i32, i32* null, i32 1) to i64) +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr i32, i32* [[A1]], i64 [[TMP5]] +// CHECK2-NEXT: [[TMP7:%.*]] = bitcast i32* [[TMP6]] to [10 x i32]* +// CHECK2-NEXT: store i32 0, i32* [[I]], align 4 +// CHECK2-NEXT: br label [[FOR_COND:%.*]] +// CHECK2: for.cond: +// CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[I]], align 4 +// CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[SIZE_ADDR]], align 4 +// CHECK2-NEXT: [[CMP:%.*]] = icmp slt i32 [[TMP8]], [[TMP9]] +// CHECK2-NEXT: br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]] +// CHECK2: for.body: +// CHECK2-NEXT: br label [[FOR_INC:%.*]] +// CHECK2: for.inc: +// CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4 +// CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP10]], 1 +// CHECK2-NEXT: store i32 [[INC]], i32* [[I]], align 4 +// CHECK2-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP12:![0-9]+]] +// CHECK2: for.end: +// CHECK2-NEXT: [[TMP11:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP12:%.*]] = bitcast i32* [[A1]] to i8* +// CHECK2-NEXT: store i8* [[TMP12]], i8** [[TMP11]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 4 +// CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[TMP13]], align 4 +// CHECK2-NEXT: [[TMP15:%.*]] = bitcast [1 x i8*]* [[DOTOMP_REDUCTION_RED_LIST]] to i8* +// CHECK2-NEXT: [[TMP16:%.*]] = call i32 @__kmpc_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], i32 1, i32 4, i8* [[TMP15]], void (i8*, i8*)* @.omp.reduction.reduction_func.14, [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: switch i32 [[TMP16]], label [[DOTOMP_REDUCTION_DEFAULT:%.*]] [ +// CHECK2-NEXT: i32 1, label [[DOTOMP_REDUCTION_CASE1:%.*]] +// CHECK2-NEXT: i32 2, label [[DOTOMP_REDUCTION_CASE2:%.*]] +// CHECK2-NEXT: ] +// CHECK2: .omp.reduction.case1: +// CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[A1]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP17]], [[TMP18]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX]], align 4 +// CHECK2-NEXT: call void @__kmpc_end_reduce_nowait(%struct.ident_t* @[[GLOB3]], i32 [[TMP14]], [8 x i32]* @.gomp_critical_user_.reduction.var) +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.case2: +// CHECK2-NEXT: [[TMP19:%.*]] = load i32, i32* [[A1]], align 4 +// CHECK2-NEXT: [[TMP20:%.*]] = atomicrmw add i32* [[ARRAYIDX]], i32 [[TMP19]] monotonic, align 4 +// CHECK2-NEXT: br label [[DOTOMP_REDUCTION_DEFAULT]] +// CHECK2: .omp.reduction.default: +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp.reduction.reduction_func.14 +// CHECK2-SAME: (i8* noundef [[TMP0:%.*]], i8* noundef [[TMP1:%.*]]) #[[ATTR3]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: [[DOTADDR1:%.*]] = alloca i8*, align 4 +// CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: store i8* [[TMP1]], i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP2:%.*]] = load i8*, i8** [[DOTADDR]], align 4 +// CHECK2-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP2]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP4:%.*]] = load i8*, i8** [[DOTADDR1]], align 4 +// CHECK2-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to [1 x i8*]* +// CHECK2-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP5]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 4 +// CHECK2-NEXT: [[TMP8:%.*]] = bitcast i8* [[TMP7]] to i32* +// CHECK2-NEXT: [[TMP9:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[TMP3]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 4 +// CHECK2-NEXT: [[TMP11:%.*]] = bitcast i8* [[TMP10]] to i32* +// CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]], align 4 +// CHECK2-NEXT: [[TMP13:%.*]] = load i32, i32* [[TMP8]], align 4 +// CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP12]], [[TMP13]] +// CHECK2-NEXT: store i32 [[ADD]], i32* [[TMP11]], align 4 +// CHECK2-NEXT: ret void +// +// +// CHECK2-LABEL: define {{[^@]+}}@main +// CHECK2-SAME: () #[[ATTR6:[0-9]+]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[SIZE:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: [[ARRAY:%.*]] = alloca i32*, align 4 +// CHECK2-NEXT: [[RESULT:%.*]] = alloca i32, align 4 +// CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 +// CHECK2-NEXT: store i32 100, i32* [[SIZE]], align 4 +// CHECK2-NEXT: [[CALL:%.*]] = call noalias noundef nonnull i8* @_Znaj(i32 noundef 400) #[[ATTR9:[0-9]+]] +// CHECK2-NEXT: [[TMP0:%.*]] = bitcast i8* [[CALL]] to i32* +// CHECK2-NEXT: store i32* [[TMP0]], i32** [[ARRAY]], align 4 +// CHECK2-NEXT: store i32 0, i32* [[RESULT]], align 4 +// CHECK2-NEXT: [[TMP1:%.*]] = load i32*, i32** [[ARRAY]], align 4 +// CHECK2-NEXT: call void @_Z3sumPiiS_(i32* noundef [[TMP1]], i32 noundef 100, i32* noundef [[RESULT]]) +// CHECK2-NEXT: ret i32 0 +// +// +// CHECK2-LABEL: define {{[^@]+}}@.omp_offloading.requires_reg +// CHECK2-SAME: () #[[ATTR8:[0-9]+]] { +// CHECK2-NEXT: entry: +// CHECK2-NEXT: call void @__tgt_register_requires(i64 1) +// CHECK2-NEXT: ret void +// diff --git a/clang/test/OpenMP/target_codegen.cpp b/clang/test/OpenMP/target_codegen.cpp --- a/clang/test/OpenMP/target_codegen.cpp +++ b/clang/test/OpenMP/target_codegen.cpp @@ -76,11 +76,13 @@ // CHECK-DAG: [[MAPT2:@.+]] = private unnamed_addr constant [1 x i64] [i64 800] // CHECK-DAG: [[SIZET3:@.+]] = private unnamed_addr constant [2 x i64] [i64 4, i64 2] // CHECK-DAG: [[MAPT3:@.+]] = private unnamed_addr constant [2 x i64] [i64 800, i64 800] +// CHECK-DAG: [[SIZET4:@.+]] = private unnamed_addr global [9 x i64] [i64 4, i64 40, i64 {{4|8}}, i64 0, i64 400, i64 {{4|8}}, i64 {{4|8}}, i64 0, i64 {{12|16}}] // CHECK-DAG: [[MAPT4:@.+]] = private unnamed_addr constant [9 x i64] [i64 800, i64 547, i64 800, i64 547, i64 547, i64 800, i64 800, i64 547, i64 547] // CHECK-DAG: [[SIZET5:@.+]] = private unnamed_addr constant [3 x i64] [i64 4, i64 2, i64 40] // CHECK-DAG: [[MAPT5:@.+]] = private unnamed_addr constant [3 x i64] [i64 800, i64 800, i64 547] // CHECK-DAG: [[SIZET6:@.+]] = private unnamed_addr constant [4 x i64] [i64 4, i64 2, i64 1, i64 40] // CHECK-DAG: [[MAPT6:@.+]] = private unnamed_addr constant [4 x i64] [i64 800, i64 800, i64 800, i64 547] +// CHECK-DAG: [[SIZET7:@.+]] = private unnamed_addr global [5 x i64] [i64 8, i64 4, i64 {{4|8}}, i64 {{4|8}}, i64 0] // CHECK-DAG: [[MAPT7:@.+]] = private unnamed_addr constant [5 x i64] [i64 547, i64 800, i64 800, i64 800, i64 547] // CHECK-DAG: [[SIZET9:@.+]] = private unnamed_addr constant [1 x i64] [i64 12] // CHECK-DAG: [[MAPT10:@.+]] = private unnamed_addr constant [1 x i64] [i64 35] @@ -272,37 +274,27 @@ // CHECK-32: [[CNSZSIZE:%.+]] = mul nuw i32 [[CNELEMSIZE2]], 8 // CHECK-32: [[CNSIZE:%.+]] = sext i32 [[CNSZSIZE]] to i64 - // CHECK-DAG: [[RET:%.+]] = call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 9, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* [[SR:%[^,]+]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[MAPT4]], i32 0, i32 0), i8** null, i8** null) + // CHECK-DAG: [[RET:%.+]] = call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 9, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET4]], i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[MAPT4]], i32 0, i32 0), i8** null, i8** null) // CHECK-DAG: [[BPR]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP:%[^,]+]], i32 0, i32 0 // CHECK-DAG: [[PR]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P:%[^,]+]], i32 0, i32 0 - // CHECK-DAG: [[SR]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S:%[^,]+]], i32 0, i32 0 - // CHECK-DAG: [[SADDR0:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX0:0]] - // CHECK-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX0]] + // CHECK-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX0:0]] // CHECK-DAG: [[PADDR0:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX0]] - // CHECK-DAG: [[SADDR1:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX1:1]] - // CHECK-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX1]] + // CHECK-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX1:1]] // CHECK-DAG: [[PADDR1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX1]] - // CHECK-DAG: [[SADDR2:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX2:2]] - // CHECK-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX2]] + // CHECK-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX2:2]] // CHECK-DAG: [[PADDR2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX2]] - // CHECK-DAG: [[SADDR3:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX3:3]] - // CHECK-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX3]] + // CHECK-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX3:3]] // CHECK-DAG: [[PADDR3:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX3]] - // CHECK-DAG: [[SADDR4:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX4:4]] - // CHECK-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX4]] + // CHECK-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX4:4]] // CHECK-DAG: [[PADDR4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX4]] - // CHECK-DAG: [[SADDR5:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX5:5]] - // CHECK-DAG: [[BPADDR5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX5]] + // CHECK-DAG: [[BPADDR5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX5:5]] // CHECK-DAG: [[PADDR5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX5]] - // CHECK-DAG: [[SADDR6:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX6:6]] - // CHECK-DAG: [[BPADDR6:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX6]] + // CHECK-DAG: [[BPADDR6:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX6:6]] // CHECK-DAG: [[PADDR6:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX6]] - // CHECK-DAG: [[SADDR7:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX7:7]] - // CHECK-DAG: [[BPADDR7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX7]] + // CHECK-DAG: [[BPADDR7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX7:7]] // CHECK-DAG: [[PADDR7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX7]] - // CHECK-DAG: [[SADDR8:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX8:8]] - // CHECK-DAG: [[BPADDR8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX8]] + // CHECK-DAG: [[BPADDR8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX8:8]] // CHECK-DAG: [[PADDR8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX8]] // The names below are not necessarily consistent with the names used for the @@ -311,55 +303,48 @@ // CHECK-DAG: [[CPADDR2:%.+]] = bitcast i8** [[PADDR2]] to i[[SZ]]* // CHECK-DAG: store i[[SZ]] [[VLA0]], i[[SZ]]* [[CBPADDR2]] // CHECK-DAG: store i[[SZ]] [[VLA0]], i[[SZ]]* [[CPADDR2]] - // CHECK-DAG: store i64 {{4|8}}, i64* [[SADDR2]] // CHECK-DAG: [[CBPADDR6:%.+]] = bitcast i8** [[BPADDR6]] to i[[SZ]]* // CHECK-DAG: [[CPADDR6:%.+]] = bitcast i8** [[PADDR6]] to i[[SZ]]* // CHECK-DAG: store i[[SZ]] [[VLA1]], i[[SZ]]* [[CBPADDR6]] // CHECK-DAG: store i[[SZ]] [[VLA1]], i[[SZ]]* [[CPADDR6]] - // CHECK-DAG: store i64 {{4|8}}, i64* [[SADDR6]] // CHECK-DAG: [[CBPADDR5:%.+]] = bitcast i8** [[BPADDR5]] to i[[SZ]]* // CHECK-DAG: [[CPADDR5:%.+]] = bitcast i8** [[PADDR5]] to i[[SZ]]* // CHECK-DAG: store i[[SZ]] 5, i[[SZ]]* [[CBPADDR5]] // CHECK-DAG: store i[[SZ]] 5, i[[SZ]]* [[CPADDR5]] - // CHECK-DAG: store i64 {{4|8}}, i64* [[SADDR5]] // CHECK-DAG: [[CBPADDR0:%.+]] = bitcast i8** [[BPADDR0]] to i[[SZ]]* // CHECK-DAG: [[CPADDR0:%.+]] = bitcast i8** [[PADDR0]] to i[[SZ]]* // CHECK-DAG: store i[[SZ]] [[A_CVAL]], i[[SZ]]* [[CBPADDR0]] // CHECK-DAG: store i[[SZ]] [[A_CVAL]], i[[SZ]]* [[CPADDR0]] - // CHECK-DAG: store i64 4, i64* [[SADDR0]] // CHECK-DAG: [[CBPADDR1:%.+]] = bitcast i8** [[BPADDR1]] to [10 x float]** // CHECK-DAG: [[CPADDR1:%.+]] = bitcast i8** [[PADDR1]] to [10 x float]** // CHECK-DAG: store [10 x float]* %{{.+}}, [10 x float]** [[CBPADDR1]] // CHECK-DAG: store [10 x float]* %{{.+}}, [10 x float]** [[CPADDR1]] - // CHECK-DAG: store i64 40, i64* [[SADDR1]] // CHECK-DAG: [[CBPADDR3:%.+]] = bitcast i8** [[BPADDR3]] to float** // CHECK-DAG: [[CPADDR3:%.+]] = bitcast i8** [[PADDR3]] to float** // CHECK-DAG: store float* %{{.+}}, float** [[CBPADDR3]] // CHECK-DAG: store float* %{{.+}}, float** [[CPADDR3]] - // CHECK-DAG: store i64 [[BNSIZE]], i64* [[SADDR3]] + // CHECK-DAG: store i64 [[BNSIZE]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET4]], i32 0, i32 [[IDX3]]) // CHECK-DAG: [[CBPADDR4:%.+]] = bitcast i8** [[BPADDR4]] to [5 x [10 x double]]** // CHECK-DAG: [[CPADDR4:%.+]] = bitcast i8** [[PADDR4]] to [5 x [10 x double]]** // CHECK-DAG: store [5 x [10 x double]]* %{{.+}}, [5 x [10 x double]]** [[CBPADDR4]] // CHECK-DAG: store [5 x [10 x double]]* %{{.+}}, [5 x [10 x double]]** [[CPADDR4]] - // CHECK-DAG: store i64 400, i64* [[SADDR4]] // CHECK-DAG: [[CBPADDR7:%.+]] = bitcast i8** [[BPADDR7]] to double** // CHECK-DAG: [[CPADDR7:%.+]] = bitcast i8** [[PADDR7]] to double** // CHECK-DAG: store double* %{{.+}}, double** [[CBPADDR7]] // CHECK-DAG: store double* %{{.+}}, double** [[CPADDR7]] - // CHECK-DAG: store i64 [[CNSIZE]], i64* [[SADDR7]] + // CHECK-DAG: store i64 [[CNSIZE]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET4]], i32 0, i32 [[IDX7]]) // CHECK-DAG: [[CBPADDR8:%.+]] = bitcast i8** [[BPADDR8]] to [[TT]]** // CHECK-DAG: [[CPADDR8:%.+]] = bitcast i8** [[PADDR8]] to [[TT]]** // CHECK-DAG: store [[TT]]* %{{.+}}, [[TT]]** [[CBPADDR8]] // CHECK-DAG: store [[TT]]* %{{.+}}, [[TT]]** [[CPADDR8]] - // CHECK-DAG: store i64 {{12|16}}, i64* [[SADDR8]] // CHECK: [[ERROR:%.+]] = icmp ne i32 [[RET]], 0 // CHECK-NEXT: br i1 [[ERROR]], label %[[FAIL:.+]], label %[[END:[^,]+]] @@ -580,24 +565,18 @@ // CHECK-32: [[CSZSIZE:%.+]] = mul nuw i32 [[CELEMSIZE2]], 2 // CHECK-32: [[CSIZE:%.+]] = sext i32 [[CSZSIZE]] to i64 -// CHECK-DAG: [[RET:%.+]] = call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 5, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* [[SR:%[^,]+]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPT7]], i32 0, i32 0), i8** null, i8** null) +// CHECK-DAG: [[RET:%.+]] = call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 5, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZET7]], i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPT7]], i32 0, i32 0), i8** null, i8** null) // CHECK-DAG: [[BPR]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP:%.+]], i32 0, i32 0 // CHECK-DAG: [[PR]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P:%.+]], i32 0, i32 0 -// CHECK-DAG: [[SR]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S:%.+]], i32 0, i32 0 -// CHECK-DAG: [[SADDR0:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 0, i32 [[IDX0:0]] -// CHECK-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX0]] +// CHECK-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX0:0]] // CHECK-DAG: [[PADDR0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 0, i32 [[IDX0]] -// CHECK-DAG: [[SADDR1:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 0, i32 [[IDX1:1]] -// CHECK-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX1]] +// CHECK-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX1:1]] // CHECK-DAG: [[PADDR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 0, i32 [[IDX1]] -// CHECK-DAG: [[SADDR2:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 0, i32 [[IDX2:2]] -// CHECK-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX2]] +// CHECK-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX2:2]] // CHECK-DAG: [[PADDR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 0, i32 [[IDX2]] -// CHECK-DAG: [[SADDR3:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 0, i32 [[IDX3:3]] -// CHECK-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX3]] +// CHECK-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX3:3]] // CHECK-DAG: [[PADDR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 0, i32 [[IDX3]] -// CHECK-DAG: [[SADDR4:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 0, i32 [[IDX4:4]] -// CHECK-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX4]] +// CHECK-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 0, i32 [[IDX4:4]] // CHECK-DAG: [[PADDR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 0, i32 [[IDX4]] // The names below are not necessarily consistent with the names used for the @@ -606,31 +585,27 @@ // CHECK-DAG: [[CPADDR4:%.+]] = bitcast i8** [[PADDR4]] to i16** // CHECK-DAG: store i16* %{{.+}}, i16** [[CBPADDR4]] // CHECK-DAG: store i16* %{{.+}}, i16** [[CPADDR4]] -// CHECK-DAG: store i64 [[CSIZE]], i64* [[SADDR4]] +// CHECK-DAG: store i64 [[CSIZE]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZET7]], i32 0, i32 [[IDX4]]) // CHECK-DAG: [[CBPADDR3:%.+]] = bitcast i8** [[BPADDR3]] to i[[SZ]]* // CHECK-DAG: [[CPADDR3:%.+]] = bitcast i8** [[PADDR3]] to i[[SZ]]* // CHECK-DAG: store i[[SZ]] [[VLA0]], i[[SZ]]* [[CBPADDR3]] // CHECK-DAG: store i[[SZ]] [[VLA0]], i[[SZ]]* [[CPADDR3]] -// CHECK-DAG: store i64 {{4|8}}, i64* [[SADDR3]] // CHECK-DAG: [[CBPADDR2:%.+]] = bitcast i8** [[BPADDR2]] to i[[SZ]]* // CHECK-DAG: [[CPADDR2:%.+]] = bitcast i8** [[PADDR2]] to i[[SZ]]* // CHECK-DAG: store i[[SZ]] 2, i[[SZ]]* [[CBPADDR2]] // CHECK-DAG: store i[[SZ]] 2, i[[SZ]]* [[CPADDR2]] -// CHECK-DAG: store i64 {{4|8}}, i64* [[SADDR2]] // CHECK-DAG: [[CBPADDR1:%.+]] = bitcast i8** [[BPADDR1]] to i[[SZ]]* // CHECK-DAG: [[CPADDR1:%.+]] = bitcast i8** [[PADDR1]] to i[[SZ]]* // CHECK-DAG: store i[[SZ]] [[B_CVAL]], i[[SZ]]* [[CBPADDR1]] // CHECK-DAG: store i[[SZ]] [[B_CVAL]], i[[SZ]]* [[CPADDR1]] -// CHECK-DAG: store i64 4, i64* [[SADDR1]] // CHECK-DAG: [[CBPADDR0:%.+]] = bitcast i8** [[BPADDR0]] to [[S1]]** // CHECK-DAG: [[CPADDR0:%.+]] = bitcast i8** [[PADDR0]] to double** // CHECK-DAG: store [[S1]]* [[THIS:%.+]], [[S1]]** [[CBPADDR0]] // CHECK-DAG: store double* [[A:%.+]], double** [[CPADDR0]] -// CHECK-DAG: store i64 8, i64* [[SADDR0]] // CHECK: [[ERROR:%.+]] = icmp ne i32 [[RET]], 0 // CHECK-NEXT: br i1 [[ERROR]], label %[[FAIL:.+]], label %[[END:[^,]+]] diff --git a/clang/test/OpenMP/target_data_codegen.cpp b/clang/test/OpenMP/target_data_codegen.cpp --- a/clang/test/OpenMP/target_data_codegen.cpp +++ b/clang/test/OpenMP/target_data_codegen.cpp @@ -37,7 +37,7 @@ // CK1: [[MTYPE03:@.+]] = {{.+}}constant [1 x i64] [i64 5] -// CK1: [[SIZE04:@.+]] = {{.+}}constant [2 x i64] [i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64 24] +// CK1: [[SIZE04:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK1: [[MTYPE04:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK1: [[MTYPE05:@.+]] = {{.+}}constant [1 x i64] [i64 1025] @@ -150,6 +150,7 @@ // CK1-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK1-DAG: store [[ST]]* @gb, [[ST]]** [[CBP0]] // CK1-DAG: store double** getelementptr inbounds ([[ST]], [[ST]]* @gb, i32 0, i32 1), double*** [[CP0]] + // CK1-DAG: store i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE04]], i32 0, i32 0), // CK1-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK1-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 @@ -357,6 +358,7 @@ } }; +// CK2: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK2: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710677] // CK2-LABEL: _Z3bari @@ -368,31 +370,27 @@ // Region 00 // CK2: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK2: [[IFTHEN]] -// CK2-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz:64|32]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) +// CK2-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) // CK2-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK2-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK2-DAG: [[GEPBP]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BP:%[^,]+]] // CK2-DAG: [[GEPP]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[P:%[^,]+]] -// CK2-DAG: [[GEPS]] = getelementptr inbounds [2 x i[[sz]]], [2 x i[[sz]]]* [[S:%[^,]+]] // CK2-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK2-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK2-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK2-DAG: store [[ST]]* [[VAR0:%.+]], [[ST]]** [[CBP0]] // CK2-DAG: store double** [[SEC0:%.+]], double*** [[CP0]] -// CK2-DAG: store i[[sz]] {{%.+}}, i[[sz]]* [[S0]] +// CK2-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE00]], i32 0, i32 0), // CK2-DAG: [[SEC0]] = getelementptr inbounds {{.*}}[[ST]]* [[VAR0]], i32 0, i32 1 // CK2-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK2-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK2-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK2-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to double*** // CK2-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to double** // CK2-DAG: store double** [[SEC0]], double*** [[CBP1]] // CK2-DAG: store double* [[SEC1:%.+]], double** [[CP1]] -// CK2-DAG: store i[[sz]] 24, i[[sz]]* [[S1]] // CK2-DAG: [[SEC1]] = getelementptr inbounds {{.*}}double* [[SEC11:%[^,]+]], i{{.+}} 1 // CK2-DAG: [[SEC11]] = load double*, double** [[SEC111:%[^,]+]], // CK2-DAG: [[SEC111]] = getelementptr inbounds {{.*}}[[ST]]* [[VAR0]], i32 0, i32 1 @@ -406,12 +404,11 @@ // CK2: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK2: [[IFTHEN]] -// CK2-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) +// CK2-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) // CK2-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK2-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK2-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP]] // CK2-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P]] -// CK2-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S]] // CK2: br label %[[IFEND:[^,]+]] // CK2: [[IFELSE]] // CK2: br label %[[IFEND]] @@ -475,6 +472,7 @@ } }; +// CK4: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK4: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976711701] // CK4-LABEL: _Z3bari @@ -486,31 +484,27 @@ // Region 00 // CK4: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK4: [[IFTHEN]] -// CK4-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz:64|32]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) +// CK4-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) // CK4-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK4-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK4-DAG: [[GEPBP]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BP:%[^,]+]] // CK4-DAG: [[GEPP]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[P:%[^,]+]] -// CK4-DAG: [[GEPS]] = getelementptr inbounds [2 x i[[sz]]], [2 x i[[sz]]]* [[S:%[^,]+]] // CK4-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK4-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK4-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK4-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[STT]]** // CK4-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK4-DAG: store [[STT]]* [[VAR0:%.+]], [[STT]]** [[CBP0]] // CK4-DAG: store double** [[SEC0:%.+]], double*** [[CP0]] -// CK4-DAG: store i[[sz]] {{%.+}}, i[[sz]]* [[S0]] +// CK4-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE00]], i32 0, i32 0), // CK4-DAG: [[SEC0]] = getelementptr inbounds {{.*}}[[STT]]* [[VAR0]], i32 0, i32 1 // CK4-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK4-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK4-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK4-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to double*** // CK4-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to double** // CK4-DAG: store double** [[SEC0]], double*** [[CBP1]] // CK4-DAG: store double* [[SEC1:%.+]], double** [[CP1]] -// CK4-DAG: store i[[sz]] 24, i[[sz]]* [[S1]] // CK4-DAG: [[SEC1]] = getelementptr inbounds {{.*}}double* [[SEC11:%[^,]+]], i{{.+}} 1 // CK4-DAG: [[SEC11]] = load double*, double** [[SEC111:%[^,]+]], // CK4-DAG: [[SEC111]] = getelementptr inbounds {{.*}}[[STT]]* [[VAR0]], i32 0, i32 1 @@ -524,12 +518,11 @@ // CK4: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK4: [[IFTHEN]] -// CK4-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) +// CK4-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) // CK4-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK4-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK4-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP]] // CK4-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P]] -// CK4-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S]] // CK4: br label %[[IFEND:[^,]+]] // CK4: [[IFELSE]] // CK4: br label %[[IFEND]] diff --git a/clang/test/OpenMP/target_data_map_codegen_hold.cpp b/clang/test/OpenMP/target_data_map_codegen_hold.cpp --- a/clang/test/OpenMP/target_data_map_codegen_hold.cpp +++ b/clang/test/OpenMP/target_data_map_codegen_hold.cpp @@ -1,4 +1,4 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals --global-value-regex ".offload_maptypes.*" ".offload_sizes.*" --global-hex-value-regex ".offload_maptypes.*" +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals --prefix-filecheck-ir-name _ --global-value-regex ".offload_maptypes.*" ".offload_sizes.*" --global-hex-value-regex ".offload_maptypes.*" // expected-no-diagnostics #ifndef HEADER #define HEADER @@ -59,7 +59,8 @@ // CHECK-PPC64LE: @.offload_maptypes.2 = private unnamed_addr constant [1 x i64] [i64 [[#0x2405]]] // CHECK-PPC64LE: @.offload_sizes.3 = private unnamed_addr constant [1 x i64] [i64 4] // CHECK-PPC64LE: @.offload_maptypes.4 = private unnamed_addr constant [1 x i64] [i64 [[#0x2003]]] -// CHECK-PPC64LE: @.offload_maptypes.5 = private unnamed_addr constant [11 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002010]], i64 [[#0x2010]], i64 [[#0x2013]], i64 [[#0x3]], i64 [[#0x2000]], i64 [[#0x7000000002003]], i64 [[#0x7000000002010]], i64 [[#0x2010]], i64 [[#0x2013]]] +// CHECK-PPC64LE: @.offload_sizes.5 = private unnamed_addr global [11 x i64] [i64 0, i64 4, i64 8, i64 8, i64 4, i64 4, i64 0, i64 4, i64 8, i64 8, i64 4] +// CHECK-PPC64LE: @.offload_maptypes.6 = private unnamed_addr constant [11 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002010]], i64 [[#0x2010]], i64 [[#0x2013]], i64 [[#0x3]], i64 [[#0x2000]], i64 [[#0x7000000002003]], i64 [[#0x7000000002010]], i64 [[#0x2010]], i64 [[#0x2013]]] //. // CHECK-I386: @.offload_sizes = private unnamed_addr constant [1 x i64] [i64 20] // CHECK-I386: @.offload_maptypes = private unnamed_addr constant [1 x i64] [i64 [[#0x2001]]] @@ -67,7 +68,8 @@ // CHECK-I386: @.offload_maptypes.2 = private unnamed_addr constant [1 x i64] [i64 [[#0x2405]]] // CHECK-I386: @.offload_sizes.3 = private unnamed_addr constant [1 x i64] [i64 4] // CHECK-I386: @.offload_maptypes.4 = private unnamed_addr constant [1 x i64] [i64 [[#0x2003]]] -// CHECK-I386: @.offload_maptypes.5 = private unnamed_addr constant [11 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002010]], i64 [[#0x2010]], i64 [[#0x2013]], i64 [[#0x3]], i64 [[#0x2000]], i64 [[#0x7000000002003]], i64 [[#0x7000000002010]], i64 [[#0x2010]], i64 [[#0x2013]]] +// CHECK-I386: @.offload_sizes.5 = private unnamed_addr global [11 x i64] [i64 0, i64 4, i64 4, i64 4, i64 4, i64 4, i64 0, i64 4, i64 4, i64 4, i64 4] +// CHECK-I386: @.offload_maptypes.6 = private unnamed_addr constant [11 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002010]], i64 [[#0x2010]], i64 [[#0x2013]], i64 [[#0x3]], i64 [[#0x2000]], i64 [[#0x7000000002003]], i64 [[#0x7000000002010]], i64 [[#0x2010]], i64 [[#0x2013]]] //. // CHECK-PPC64LE-LABEL: @_Z3fooi( // CHECK-PPC64LE-NEXT: entry: @@ -87,7 +89,6 @@ // CHECK-PPC64LE-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [11 x i8*], align 8 // CHECK-PPC64LE-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [11 x i8*], align 8 // CHECK-PPC64LE-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [11 x i8*], align 8 -// CHECK-PPC64LE-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [11 x i64], align 8 // CHECK-PPC64LE-NEXT: store i32 [[ARG:%.*]], i32* [[ARG_ADDR]], align 4 // CHECK-PPC64LE-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK-PPC64LE-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [5 x float]** @@ -208,121 +209,99 @@ // CHECK-PPC64LE-NEXT: [[TMP72:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK-PPC64LE-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to %struct.S1** // CHECK-PPC64LE-NEXT: store %struct.S1* [[S]], %struct.S1** [[TMP73]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP74:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-PPC64LE-NEXT: store i64 [[TMP49]], i64* [[TMP74]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP75:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP75]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP76:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 -// CHECK-PPC64LE-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to %struct.S2** -// CHECK-PPC64LE-NEXT: store %struct.S2* [[TMP30]], %struct.S2** [[TMP77]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP78:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 -// CHECK-PPC64LE-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to %struct.S1** -// CHECK-PPC64LE-NEXT: store %struct.S1* [[S]], %struct.S1** [[TMP79]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP80:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-PPC64LE-NEXT: store i64 4, i64* [[TMP80]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP81:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP81]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP82:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK-PPC64LE-NEXT: store i64 [[TMP49]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 0), align 8 +// CHECK-PPC64LE-NEXT: [[TMP74:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP75:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 +// CHECK-PPC64LE-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to %struct.S2** +// CHECK-PPC64LE-NEXT: store %struct.S2* [[TMP30]], %struct.S2** [[TMP76]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP77:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 +// CHECK-PPC64LE-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to %struct.S1** +// CHECK-PPC64LE-NEXT: store %struct.S1* [[S]], %struct.S1** [[TMP78]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP79:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP79]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP80:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK-PPC64LE-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to %struct.S2*** +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS]], %struct.S2*** [[TMP81]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP82:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 // CHECK-PPC64LE-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS]], %struct.S2*** [[TMP83]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP84:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 -// CHECK-PPC64LE-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP85]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP86:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-PPC64LE-NEXT: store i64 8, i64* [[TMP86]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP87:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP87]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP88:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 -// CHECK-PPC64LE-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP89]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP90:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP83]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP84:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP84]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP85:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 +// CHECK-PPC64LE-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to %struct.S2*** +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP86]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP87:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK-PPC64LE-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to %struct.S2*** +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS13]], %struct.S2*** [[TMP88]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP89:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP89]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP90:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 // CHECK-PPC64LE-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to %struct.S2*** // CHECK-PPC64LE-NEXT: store %struct.S2** [[PS13]], %struct.S2*** [[TMP91]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP92:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK-PPC64LE-NEXT: store i64 8, i64* [[TMP92]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP93:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP93]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP94:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 -// CHECK-PPC64LE-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS13]], %struct.S2*** [[TMP95]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP96:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 -// CHECK-PPC64LE-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to %struct.S1** -// CHECK-PPC64LE-NEXT: store %struct.S1* [[S17]], %struct.S1** [[TMP97]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP98:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK-PPC64LE-NEXT: store i64 4, i64* [[TMP98]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP99:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK-PPC64LE-NEXT: [[TMP92:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 +// CHECK-PPC64LE-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to %struct.S1** +// CHECK-PPC64LE-NEXT: store %struct.S1* [[S17]], %struct.S1** [[TMP93]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP94:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP94]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP95:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK-PPC64LE-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32** +// CHECK-PPC64LE-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP96]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP97:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK-PPC64LE-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32** +// CHECK-PPC64LE-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP98]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP99:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 // CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP99]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP100:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 -// CHECK-PPC64LE-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32** -// CHECK-PPC64LE-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP101]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP102:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 -// CHECK-PPC64LE-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** -// CHECK-PPC64LE-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP103]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP104:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK-PPC64LE-NEXT: store i64 4, i64* [[TMP104]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP105:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP105]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP106:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 -// CHECK-PPC64LE-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to %struct.S2** -// CHECK-PPC64LE-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP107]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP108:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 -// CHECK-PPC64LE-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.S1** -// CHECK-PPC64LE-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP109]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP110:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK-PPC64LE-NEXT: store i64 [[TMP69]], i64* [[TMP110]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP111:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP111]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP112:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 -// CHECK-PPC64LE-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to %struct.S2** -// CHECK-PPC64LE-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP113]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP114:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 -// CHECK-PPC64LE-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.S1** -// CHECK-PPC64LE-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP115]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP116:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK-PPC64LE-NEXT: store i64 4, i64* [[TMP116]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP117:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP117]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP118:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 -// CHECK-PPC64LE-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS19]], %struct.S2*** [[TMP119]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP120:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK-PPC64LE-NEXT: [[TMP100:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 +// CHECK-PPC64LE-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to %struct.S2** +// CHECK-PPC64LE-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP101]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP102:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 +// CHECK-PPC64LE-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to %struct.S1** +// CHECK-PPC64LE-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP103]], align 8 +// CHECK-PPC64LE-NEXT: store i64 [[TMP69]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 6), align 8 +// CHECK-PPC64LE-NEXT: [[TMP104:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP105:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 +// CHECK-PPC64LE-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to %struct.S2** +// CHECK-PPC64LE-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP106]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP107:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 +// CHECK-PPC64LE-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to %struct.S1** +// CHECK-PPC64LE-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP108]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP109:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP110:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 +// CHECK-PPC64LE-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to %struct.S2*** +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS19]], %struct.S2*** [[TMP111]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP112:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK-PPC64LE-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to %struct.S2*** +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP113]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP114:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP115:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 +// CHECK-PPC64LE-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to %struct.S2*** +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP116]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP117:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 +// CHECK-PPC64LE-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to %struct.S2*** +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP118]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP119:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP120:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 10 // CHECK-PPC64LE-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP121]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP122:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK-PPC64LE-NEXT: store i64 8, i64* [[TMP122]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP123:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP123]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP124:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 -// CHECK-PPC64LE-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP125]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP126:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 -// CHECK-PPC64LE-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP127]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP128:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK-PPC64LE-NEXT: store i64 8, i64* [[TMP128]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP129:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP129]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP130:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 10 -// CHECK-PPC64LE-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to %struct.S2*** -// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP131]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP132:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 10 -// CHECK-PPC64LE-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to %struct.S1** -// CHECK-PPC64LE-NEXT: store %struct.S1* [[S28]], %struct.S1** [[TMP133]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP134:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 10 -// CHECK-PPC64LE-NEXT: store i64 4, i64* [[TMP134]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP135:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 10 -// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP135]], align 8 -// CHECK-PPC64LE-NEXT: [[TMP136:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK-PPC64LE-NEXT: [[TMP137:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK-PPC64LE-NEXT: [[TMP138:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-PPC64LE-NEXT: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP136]], i8** [[TMP137]], i64* [[TMP138]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null) -// CHECK-PPC64LE-NEXT: [[TMP139:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 -// CHECK-PPC64LE-NEXT: [[INC32:%.*]] = add nsw i32 [[TMP139]], 1 +// CHECK-PPC64LE-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP121]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP122:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 10 +// CHECK-PPC64LE-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to %struct.S1** +// CHECK-PPC64LE-NEXT: store %struct.S1* [[S28]], %struct.S1** [[TMP123]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP124:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 10 +// CHECK-PPC64LE-NEXT: store i8* null, i8** [[TMP124]], align 8 +// CHECK-PPC64LE-NEXT: [[TMP125:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK-PPC64LE-NEXT: [[TMP126:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 +// CHECK-PPC64LE-NEXT: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP125]], i8** [[TMP126]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null) +// CHECK-PPC64LE-NEXT: [[TMP127:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 +// CHECK-PPC64LE-NEXT: [[INC32:%.*]] = add nsw i32 [[TMP127]], 1 // CHECK-PPC64LE-NEXT: store i32 [[INC32]], i32* [[ARG_ADDR]], align 4 -// CHECK-PPC64LE-NEXT: [[TMP140:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK-PPC64LE-NEXT: [[TMP141:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK-PPC64LE-NEXT: [[TMP142:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-PPC64LE-NEXT: call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP140]], i8** [[TMP141]], i64* [[TMP142]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null) +// CHECK-PPC64LE-NEXT: [[TMP128:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK-PPC64LE-NEXT: [[TMP129:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 +// CHECK-PPC64LE-NEXT: call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP128]], i8** [[TMP129]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null) // CHECK-PPC64LE-NEXT: ret void // // CHECK-I386-LABEL: @_Z3fooi( @@ -343,7 +322,6 @@ // CHECK-I386-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [11 x i8*], align 4 // CHECK-I386-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [11 x i8*], align 4 // CHECK-I386-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [11 x i8*], align 4 -// CHECK-I386-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [11 x i64], align 4 // CHECK-I386-NEXT: store i32 [[ARG:%.*]], i32* [[ARG_ADDR]], align 4 // CHECK-I386-NEXT: [[TMP0:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK-I386-NEXT: [[TMP1:%.*]] = bitcast i8** [[TMP0]] to [5 x float]** @@ -464,121 +442,99 @@ // CHECK-I386-NEXT: [[TMP72:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK-I386-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to %struct.S1** // CHECK-I386-NEXT: store %struct.S1* [[S]], %struct.S1** [[TMP73]], align 4 -// CHECK-I386-NEXT: [[TMP74:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-I386-NEXT: store i64 [[TMP49]], i64* [[TMP74]], align 4 -// CHECK-I386-NEXT: [[TMP75:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 0 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK-I386-NEXT: [[TMP76:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 -// CHECK-I386-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to %struct.S2** -// CHECK-I386-NEXT: store %struct.S2* [[TMP30]], %struct.S2** [[TMP77]], align 4 -// CHECK-I386-NEXT: [[TMP78:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 -// CHECK-I386-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to %struct.S1** -// CHECK-I386-NEXT: store %struct.S1* [[S]], %struct.S1** [[TMP79]], align 4 -// CHECK-I386-NEXT: [[TMP80:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK-I386-NEXT: [[TMP81:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 1 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK-I386-NEXT: [[TMP82:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK-I386-NEXT: store i64 [[TMP49]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 0), align 4 +// CHECK-I386-NEXT: [[TMP74:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 0 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP74]], align 4 +// CHECK-I386-NEXT: [[TMP75:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 +// CHECK-I386-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to %struct.S2** +// CHECK-I386-NEXT: store %struct.S2* [[TMP30]], %struct.S2** [[TMP76]], align 4 +// CHECK-I386-NEXT: [[TMP77:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 +// CHECK-I386-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to %struct.S1** +// CHECK-I386-NEXT: store %struct.S1* [[S]], %struct.S1** [[TMP78]], align 4 +// CHECK-I386-NEXT: [[TMP79:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 1 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP79]], align 4 +// CHECK-I386-NEXT: [[TMP80:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK-I386-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to %struct.S2*** +// CHECK-I386-NEXT: store %struct.S2** [[PS]], %struct.S2*** [[TMP81]], align 4 +// CHECK-I386-NEXT: [[TMP82:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 // CHECK-I386-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS]], %struct.S2*** [[TMP83]], align 4 -// CHECK-I386-NEXT: [[TMP84:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 -// CHECK-I386-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP85]], align 4 -// CHECK-I386-NEXT: [[TMP86:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK-I386-NEXT: [[TMP87:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 2 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK-I386-NEXT: [[TMP88:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 -// CHECK-I386-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP89]], align 4 -// CHECK-I386-NEXT: [[TMP90:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK-I386-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP83]], align 4 +// CHECK-I386-NEXT: [[TMP84:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 2 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP84]], align 4 +// CHECK-I386-NEXT: [[TMP85:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 +// CHECK-I386-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to %struct.S2*** +// CHECK-I386-NEXT: store %struct.S2** [[PS10]], %struct.S2*** [[TMP86]], align 4 +// CHECK-I386-NEXT: [[TMP87:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK-I386-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to %struct.S2*** +// CHECK-I386-NEXT: store %struct.S2** [[PS13]], %struct.S2*** [[TMP88]], align 4 +// CHECK-I386-NEXT: [[TMP89:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 3 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP89]], align 4 +// CHECK-I386-NEXT: [[TMP90:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 // CHECK-I386-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to %struct.S2*** // CHECK-I386-NEXT: store %struct.S2** [[PS13]], %struct.S2*** [[TMP91]], align 4 -// CHECK-I386-NEXT: [[TMP92:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP92]], align 4 -// CHECK-I386-NEXT: [[TMP93:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 3 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK-I386-NEXT: [[TMP94:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 -// CHECK-I386-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS13]], %struct.S2*** [[TMP95]], align 4 -// CHECK-I386-NEXT: [[TMP96:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 -// CHECK-I386-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to %struct.S1** -// CHECK-I386-NEXT: store %struct.S1* [[S17]], %struct.S1** [[TMP97]], align 4 -// CHECK-I386-NEXT: [[TMP98:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP98]], align 4 -// CHECK-I386-NEXT: [[TMP99:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 4 +// CHECK-I386-NEXT: [[TMP92:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 +// CHECK-I386-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to %struct.S1** +// CHECK-I386-NEXT: store %struct.S1* [[S17]], %struct.S1** [[TMP93]], align 4 +// CHECK-I386-NEXT: [[TMP94:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 4 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP94]], align 4 +// CHECK-I386-NEXT: [[TMP95:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK-I386-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32** +// CHECK-I386-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP96]], align 4 +// CHECK-I386-NEXT: [[TMP97:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK-I386-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32** +// CHECK-I386-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP98]], align 4 +// CHECK-I386-NEXT: [[TMP99:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 5 // CHECK-I386-NEXT: store i8* null, i8** [[TMP99]], align 4 -// CHECK-I386-NEXT: [[TMP100:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 -// CHECK-I386-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32** -// CHECK-I386-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP101]], align 4 -// CHECK-I386-NEXT: [[TMP102:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 -// CHECK-I386-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** -// CHECK-I386-NEXT: store i32* [[ARG_ADDR]], i32** [[TMP103]], align 4 -// CHECK-I386-NEXT: [[TMP104:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP104]], align 4 -// CHECK-I386-NEXT: [[TMP105:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 5 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP105]], align 4 -// CHECK-I386-NEXT: [[TMP106:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 -// CHECK-I386-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to %struct.S2** -// CHECK-I386-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP107]], align 4 -// CHECK-I386-NEXT: [[TMP108:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 -// CHECK-I386-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.S1** -// CHECK-I386-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP109]], align 4 -// CHECK-I386-NEXT: [[TMP110:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK-I386-NEXT: store i64 [[TMP69]], i64* [[TMP110]], align 4 -// CHECK-I386-NEXT: [[TMP111:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 6 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP111]], align 4 -// CHECK-I386-NEXT: [[TMP112:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 -// CHECK-I386-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to %struct.S2** -// CHECK-I386-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP113]], align 4 -// CHECK-I386-NEXT: [[TMP114:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 -// CHECK-I386-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.S1** -// CHECK-I386-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP115]], align 4 -// CHECK-I386-NEXT: [[TMP116:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP116]], align 4 -// CHECK-I386-NEXT: [[TMP117:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 7 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP117]], align 4 -// CHECK-I386-NEXT: [[TMP118:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 -// CHECK-I386-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS19]], %struct.S2*** [[TMP119]], align 4 -// CHECK-I386-NEXT: [[TMP120:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK-I386-NEXT: [[TMP100:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 +// CHECK-I386-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to %struct.S2** +// CHECK-I386-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP101]], align 4 +// CHECK-I386-NEXT: [[TMP102:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 +// CHECK-I386-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to %struct.S1** +// CHECK-I386-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP103]], align 4 +// CHECK-I386-NEXT: store i64 [[TMP69]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 6), align 4 +// CHECK-I386-NEXT: [[TMP104:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 6 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK-I386-NEXT: [[TMP105:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 +// CHECK-I386-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to %struct.S2** +// CHECK-I386-NEXT: store %struct.S2* [[TMP50]], %struct.S2** [[TMP106]], align 4 +// CHECK-I386-NEXT: [[TMP107:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 +// CHECK-I386-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to %struct.S1** +// CHECK-I386-NEXT: store %struct.S1* [[S18]], %struct.S1** [[TMP108]], align 4 +// CHECK-I386-NEXT: [[TMP109:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 7 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK-I386-NEXT: [[TMP110:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 +// CHECK-I386-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to %struct.S2*** +// CHECK-I386-NEXT: store %struct.S2** [[PS19]], %struct.S2*** [[TMP111]], align 4 +// CHECK-I386-NEXT: [[TMP112:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK-I386-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to %struct.S2*** +// CHECK-I386-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP113]], align 4 +// CHECK-I386-NEXT: [[TMP114:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 8 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK-I386-NEXT: [[TMP115:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 +// CHECK-I386-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to %struct.S2*** +// CHECK-I386-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP116]], align 4 +// CHECK-I386-NEXT: [[TMP117:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 +// CHECK-I386-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to %struct.S2*** +// CHECK-I386-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP118]], align 4 +// CHECK-I386-NEXT: [[TMP119:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 9 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK-I386-NEXT: [[TMP120:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 10 // CHECK-I386-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP121]], align 4 -// CHECK-I386-NEXT: [[TMP122:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP122]], align 4 -// CHECK-I386-NEXT: [[TMP123:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 8 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP123]], align 4 -// CHECK-I386-NEXT: [[TMP124:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 -// CHECK-I386-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS21]], %struct.S2*** [[TMP125]], align 4 -// CHECK-I386-NEXT: [[TMP126:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 -// CHECK-I386-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP127]], align 4 -// CHECK-I386-NEXT: [[TMP128:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP128]], align 4 -// CHECK-I386-NEXT: [[TMP129:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 9 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP129]], align 4 -// CHECK-I386-NEXT: [[TMP130:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 10 -// CHECK-I386-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to %struct.S2*** -// CHECK-I386-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP131]], align 4 -// CHECK-I386-NEXT: [[TMP132:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 10 -// CHECK-I386-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to %struct.S1** -// CHECK-I386-NEXT: store %struct.S1* [[S28]], %struct.S1** [[TMP133]], align 4 -// CHECK-I386-NEXT: [[TMP134:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 10 -// CHECK-I386-NEXT: store i64 4, i64* [[TMP134]], align 4 -// CHECK-I386-NEXT: [[TMP135:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 10 -// CHECK-I386-NEXT: store i8* null, i8** [[TMP135]], align 4 -// CHECK-I386-NEXT: [[TMP136:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK-I386-NEXT: [[TMP137:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK-I386-NEXT: [[TMP138:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-I386-NEXT: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP136]], i8** [[TMP137]], i64* [[TMP138]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null) -// CHECK-I386-NEXT: [[TMP139:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 -// CHECK-I386-NEXT: [[INC32:%.*]] = add nsw i32 [[TMP139]], 1 +// CHECK-I386-NEXT: store %struct.S2** [[PS24]], %struct.S2*** [[TMP121]], align 4 +// CHECK-I386-NEXT: [[TMP122:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 10 +// CHECK-I386-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to %struct.S1** +// CHECK-I386-NEXT: store %struct.S1* [[S28]], %struct.S1** [[TMP123]], align 4 +// CHECK-I386-NEXT: [[TMP124:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i32 0, i32 10 +// CHECK-I386-NEXT: store i8* null, i8** [[TMP124]], align 4 +// CHECK-I386-NEXT: [[TMP125:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK-I386-NEXT: [[TMP126:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 +// CHECK-I386-NEXT: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP125]], i8** [[TMP126]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null) +// CHECK-I386-NEXT: [[TMP127:%.*]] = load i32, i32* [[ARG_ADDR]], align 4 +// CHECK-I386-NEXT: [[INC32:%.*]] = add nsw i32 [[TMP127]], 1 // CHECK-I386-NEXT: store i32 [[INC32]], i32* [[ARG_ADDR]], align 4 -// CHECK-I386-NEXT: [[TMP140:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK-I386-NEXT: [[TMP141:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK-I386-NEXT: [[TMP142:%.*]] = getelementptr inbounds [11 x i64], [11 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-I386-NEXT: call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP140]], i8** [[TMP141]], i64* [[TMP142]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null) +// CHECK-I386-NEXT: [[TMP128:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK-I386-NEXT: [[TMP129:%.*]] = getelementptr inbounds [11 x i8*], [11 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 +// CHECK-I386-NEXT: call void @__tgt_target_data_end_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i32 11, i8** [[TMP128]], i8** [[TMP129]], i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([11 x i64], [11 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null) // CHECK-I386-NEXT: ret void // void foo(int arg) { diff --git a/clang/test/OpenMP/target_data_map_pointer_array_subscript_codegen.cpp b/clang/test/OpenMP/target_data_map_pointer_array_subscript_codegen.cpp --- a/clang/test/OpenMP/target_data_map_pointer_array_subscript_codegen.cpp +++ b/clang/test/OpenMP/target_data_map_pointer_array_subscript_codegen.cpp @@ -35,6 +35,7 @@ // CHECK-DAG: [[SIZES0:@.+]] = private unnamed_addr constant [1 x i64] [i64 {{8|4}}] // CHECK-DAG: [[MAPS0:@.+]] = private unnamed_addr constant [1 x i64] [i64 17] +// CHECK-DAG: [[SIZES1:@.+]] = private unnamed_addr global [2 x i64] [i64 0, i64 4] // CHECK-DAG: [[MAPS1:@.+]] = private unnamed_addr constant [2 x i64] [i64 0, i64 281474976710673] // CHECK: @main int main(void) { @@ -47,7 +48,7 @@ // CHECK: [[BPTR0:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* %{{.+}}, i32 0, i32 0 // CHECK: [[BPTRC0:%.+]] = bitcast i8** [[BPTR0]] to %struct.MyObject** // CHECK: store %struct.MyObject* [[OBJ]], %struct.MyObject** [[BPTRC0]], -// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** %{{.+}}, i8** %{{.+}}, i64* %{{.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[MAPS1]], i32 0, i32 0), i8** null, i8** null) +// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** %{{.+}}, i8** %{{.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZES1]], i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[MAPS1]], i32 0, i32 0), i8** null, i8** null) #pragma omp target enter data map(to : objects[1].arr [0:1]) return 0; diff --git a/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp b/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp --- a/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp +++ b/clang/test/OpenMP/target_data_use_device_addr_codegen.cpp @@ -14,6 +14,7 @@ // CHECK-DAG: [[SIZES1:@.+]] = private unnamed_addr constant [5 x i64] zeroinitializer // 64 = 0x40 = OMP_MAP_RETURN_PARAM // CHECK-DAG: [[MAPTYPES1:@.+]] = private unnamed_addr constant [5 x i64] [i64 64, i64 64, i64 64, i64 64, i64 64] +// CHECK-DAG: [[SIZES2:@.+]] = private unnamed_addr global [5 x i64] zeroinitializer // 0 = OMP_MAP_NONE // 281474976710720 = 0x1000000000040 = OMP_MAP_MEMBER_OF | OMP_MAP_RETURN_PARAM // CHECK-DAG: [[MAPTYPES2:@.+]] = private unnamed_addr constant [5 x i64] [i64 0, i64 281474976710720, i64 281474976710720, i64 281474976710720, i64 281474976710720] @@ -119,7 +120,6 @@ // %this.addr = alloca %struct.S*, align 8 // CHECK: [[BPTRS:%.+]] = alloca [5 x i8*], // CHECK: [[PTRS:%.+]] = alloca [5 x i8*], -// CHECK: [[SIZES:%.+]] = alloca [5 x i64], // %tmp = alloca i32*, align 8 // %tmp6 = alloca i32**, align 8 // %tmp7 = alloca i32*, align 8 @@ -150,44 +150,34 @@ // CHECK: [[PTR0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i32 0, i32 0 // CHECK: [[PTR0_BEGIN:%.+]] = bitcast i8** [[PTR0]] to i32** // CHECK: store i32* [[A_ADDR]], i32** [[PTR0_BEGIN]], -// CHECK: [[SIZE0:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[SIZES]], i32 0, i32 0 -// CHECK: store i64 [[SZ]], i64* [[SIZE0]], +// CHECK: store i64 [[SZ]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZES2]], i32 0, i32 0), // CHECK: [[BPTR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BPTRS]], i32 0, i32 1 // CHECK: [[BPTR1_A_ADDR:%.+]] = bitcast i8** [[BPTR1]] to i32** // CHECK: store i32* [[A_ADDR2]], i32** [[BPTR1_A_ADDR]], // CHECK: [[PTR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i32 0, i32 1 // CHECK: [[PTR1_A_ADDR:%.+]] = bitcast i8** [[PTR1]] to i32** // CHECK: store i32* [[A_ADDR2]], i32** [[PTR1_A_ADDR]], -// CHECK: [[SIZE1:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[SIZES]], i32 0, i32 1 -// CHECK: store i64 0, i64* [[SIZE1]], // CHECK: [[BPTR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BPTRS]], i32 0, i32 2 // CHECK: [[BPTR2_PTR_ADDR:%.+]] = bitcast i8** [[BPTR2]] to i32*** // CHECK: store i32** [[PTR_ADDR]], i32*** [[BPTR2_PTR_ADDR]], // CHECK: [[PTR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i32 0, i32 2 // CHECK: [[PTR2_PTR_ADDR:%.+]] = bitcast i8** [[PTR2]] to i32*** // CHECK: store i32** [[PTR_ADDR]], i32*** [[PTR2_PTR_ADDR]], -// CHECK: [[SIZE2:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[SIZES]], i32 0, i32 2 -// CHECK: store i64 0, i64* [[SIZE2]], // CHECK: [[BPTR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BPTRS]], i32 0, i32 3 // CHECK: [[BPTR3_REF_PTR:%.+]] = bitcast i8** [[BPTR3]] to i32** // CHECK: store i32* [[REF_PTR]], i32** [[BPTR3_REF_PTR]], // CHECK: [[PTR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i32 0, i32 3 // CHECK: [[PTR3_REF_PTR:%.+]] = bitcast i8** [[PTR3]] to i32** // CHECK: store i32* [[REF_PTR]], i32** [[PTR3_REF_PTR]], -// CHECK: [[SIZE3:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[SIZES]], i32 0, i32 3 -// CHECK: store i64 0, i64* [[SIZE3]], // CHECK: [[BPTR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BPTRS]], i32 0, i32 4 // CHECK: [[BPTR4_ARR_ADDR:%.+]] = bitcast i8** [[BPTR4]] to [4 x i32]** // CHECK: store [4 x i32]* [[ARR_ADDR2]], [4 x i32]** [[BPTR4_ARR_ADDR]], // CHECK: [[PTR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i32 0, i32 4 // CHECK: [[PTR4_ARR_ADDR:%.+]] = bitcast i8** [[PTR4]] to [4 x i32]** // CHECK: store [4 x i32]* [[ARR_ADDR2]], [4 x i32]** [[PTR4_ARR_ADDR]], -// CHECK: [[SIZE4:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[SIZES]], i32 0, i32 4 -// CHECK: store i64 0, i64* [[SIZE4]], // CHECK: [[BPTR:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BPTRS]], i32 0, i32 0 // CHECK: [[PTR:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i32 0, i32 0 -// CHECK: [[SIZE:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[SIZES]], i32 0, i32 0 -// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 5, i8** [[BPTR]], i8** [[PTR]], i64* [[SIZE]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPTYPES2]], i32 0, i32 0), i8** null, i8** null) +// CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 5, i8** [[BPTR]], i8** [[PTR]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZES2]], i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPTYPES2]], i32 0, i32 0), i8** null, i8** null) // CHECK: [[A_ADDR:%.+]] = load i32*, i32** [[BPTR1_A_ADDR]], // CHECK: store i32* [[A_ADDR]], i32** [[A_REF:%.+]], // CHECK: [[PTR_ADDR:%.+]] = load i32**, i32*** [[BPTR2_PTR_ADDR]], @@ -218,7 +208,6 @@ // CHECK: store i32 [[INC]], i32* [[ARR0_ADDR]], // CHECK: [[BPTR:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BPTRS]], i32 0, i32 0 // CHECK: [[PTR:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS]], i32 0, i32 0 -// CHECK: [[SIZE:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[SIZES]], i32 0, i32 0 -// CHECK: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 5, i8** [[BPTR]], i8** [[PTR]], i64* [[SIZE]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPTYPES2]], i32 0, i32 0), i8** null, i8** null) +// CHECK: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 5, i8** [[BPTR]], i8** [[PTR]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZES2]], i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPTYPES2]], i32 0, i32 0), i8** null, i8** null) #endif diff --git a/clang/test/OpenMP/target_defaultmap_codegen_01.cpp b/clang/test/OpenMP/target_defaultmap_codegen_01.cpp --- a/clang/test/OpenMP/target_defaultmap_codegen_01.cpp +++ b/clang/test/OpenMP/target_defaultmap_codegen_01.cpp @@ -739,7 +739,7 @@ // CK15-LABEL: @.__omp_offloading_{{.*}}implicit_maps_variable_length_array{{.*}}_l{{[0-9]+}}.region_id = weak{{.*}} constant i8 0 -// We don't have a constant map size for VLAs. +// CK15-DAG: [[SIZES:@.+]] = {{.+}}global [3 x i64] [i64 {{4|8}}, i64 {{4|8}}, i64 0] // Map types: // - OMP_MAP_LITERAL + OMP_MAP_TARGET_PARAM + OMP_MAP_IMPLICIT = 800 (vla size) // - OMP_MAP_LITERAL + OMP_MAP_TARGET_PARAM + OMP_MAP_IMPLICIT = 800 (vla size) @@ -750,37 +750,31 @@ void implicit_maps_variable_length_array (int a){ double vla[2][a]; - // CK15-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 3, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], i64* [[SGEP:%[^,]+]], {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) + // CK15-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 3, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], {{.+}}[[SIZES]]{{.+}}, {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) // CK15-DAG: [[BPGEP]] = getelementptr inbounds {{.+}}[[BPS:%[^,]+]], i32 0, i32 0 // CK15-DAG: [[PGEP]] = getelementptr inbounds {{.+}}[[PS:%[^,]+]], i32 0, i32 0 - // CK15-DAG: [[SGEP]] = getelementptr inbounds {{.+}}[[SS:%[^,]+]], i32 0, i32 0 // CK15-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 0 // CK15-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 - // CK15-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[SS]], i32 0, i32 0 // CK15-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[sz:64|32]]* // CK15-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[sz]]* // CK15-DAG: store i[[sz]] 2, i[[sz]]* [[CBP0]] // CK15-DAG: store i[[sz]] 2, i[[sz]]* [[CP0]] - // CK15-DAG: store i64 {{8|4}}, i64* [[S0]], // CK15-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 1 // CK15-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 1 - // CK15-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[SS]], i32 0, i32 1 // CK15-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i[[sz]]* // CK15-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK15-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP1]] // CK15-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] - // CK15-DAG: store i64 {{8|4}}, i64* [[S1]], // CK15-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 2 // CK15-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 2 - // CK15-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[SS]], i32 0, i32 2 // CK15-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to double** // CK15-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK15-DAG: store double* [[DECL:%.+]], double** [[CBP2]] // CK15-DAG: store double* [[DECL]], double** [[CP2]] - // CK15-DAG: store i64 [[VALS2:%.+]], i64* [[S2]], + // CK15-DAG: store i64 [[VALS2:%.+]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZES]], i32 0, i32 2), // CK15-DAG: [[VALS2]] = {{mul nuw i64 %.+, 8|sext i32 %.+ to i64}} // CK15: call void [[KERNEL:@.+]](i[[sz]] {{.+}}, i[[sz]] {{.+}}, double* [[DECL]]) diff --git a/clang/test/OpenMP/target_enter_data_codegen.cpp b/clang/test/OpenMP/target_enter_data_codegen.cpp --- a/clang/test/OpenMP/target_enter_data_codegen.cpp +++ b/clang/test/OpenMP/target_enter_data_codegen.cpp @@ -44,7 +44,7 @@ // CK1: [[MTYPE03:@.+]] = {{.+}}constant [1 x i64] [i64 5] -// CK1: [[SIZE04:@.+]] = {{.+}}constant [2 x i64] [i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64 24] +// CK1: [[SIZE04:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK1: [[MTYPE04:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK1: [[MTYPE05:@.+]] = {{.+}}constant [1 x i64] [i64 1025] @@ -156,6 +156,7 @@ // CK1-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK1-DAG: store [[ST]]* @gb, [[ST]]** [[CBP0]] // CK1-DAG: store double** getelementptr inbounds ([[ST]], [[ST]]* @gb, i32 0, i32 1), double*** [[CP0]] + // CK1-DAG: store i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE04]], i32 0, i32 0), // CK1-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 @@ -355,6 +356,7 @@ } }; +// CK2: [[SIZES:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK2: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710677] // CK2-LABEL: _Z3bari @@ -366,21 +368,19 @@ // Region 00 // CK2: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK2: [[IFTHEN]] -// CK2-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz:64|32]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) +// CK2-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZES]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK2-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK2-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK2-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK2-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK2-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK2-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK2-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK2-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK2-DAG: store [[ST]]* [[VAR0:%.+]], [[ST]]** [[CBP0]] // CK2-DAG: store double** [[SEC0:%.+]], double*** [[CP0]] -// CK2-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK2-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0), // CK2-DAG: [[SEC0]] = getelementptr inbounds {{.*}}[[ST]]* [[VAR0]], i32 0, i32 1 // CK2-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 @@ -505,6 +505,7 @@ } }; +// CK5: [[SIZES:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK5: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976711701] // CK5-LABEL: _Z3bari @@ -516,21 +517,19 @@ // Region 00 // CK5: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK5: [[IFTHEN]] -// CK5-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz:64|32]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) +// CK5-DAG: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZES]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK5-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK5-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK5-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK5-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK5-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK5-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK5-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK5-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK5-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[STT]]** // CK5-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK5-DAG: store [[STT]]* [[VAR0:%.+]], [[STT]]** [[CBP0]] // CK5-DAG: store double** [[SEC0:%.+]], double*** [[CP0]] -// CK5-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK5-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0), // CK5-DAG: [[SEC0]] = getelementptr inbounds {{.*}}[[STT]]* [[VAR0]], i32 0, i32 1 // CK5-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 diff --git a/clang/test/OpenMP/target_enter_data_depend_codegen.cpp b/clang/test/OpenMP/target_enter_data_depend_codegen.cpp --- a/clang/test/OpenMP/target_enter_data_depend_codegen.cpp +++ b/clang/test/OpenMP/target_enter_data_depend_codegen.cpp @@ -37,7 +37,7 @@ // CK1: [[MTYPE03:@.+]] = {{.+}}constant [1 x i64] zeroinitializer -// CK1: [[SIZE04:@.+]] = {{.+}}constant [2 x i64] [i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64 24] +// CK1: [[SIZE04:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK1: [[MTYPE04:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK1-LABEL: _Z3fooi @@ -290,6 +290,7 @@ // CK1: [[P0:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[P:%.+]], i32 0, i32 0 // CK1: [[P0_BC:%.+]] = bitcast i8** [[P0]] to double*** // CK1: store double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), double*** [[P0_BC]], + // CK1: store i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE04]], i32 0, i32 0), // CK1: [[BP1:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BP]], i32 0, i32 1 // CK1: [[BP1_BC:%.+]] = bitcast i8** [[BP1]] to double*** // CK1: store double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), double*** [[BP1_BC]], diff --git a/clang/test/OpenMP/target_exit_data_codegen.cpp b/clang/test/OpenMP/target_exit_data_codegen.cpp --- a/clang/test/OpenMP/target_exit_data_codegen.cpp +++ b/clang/test/OpenMP/target_exit_data_codegen.cpp @@ -43,7 +43,7 @@ // CK1: [[MTYPE03:@.+]] = {{.+}}constant [1 x i64] [i64 6] -// CK1: [[SIZE04:@.+]] = {{.+}}constant [2 x i64] [i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64 24] +// CK1: [[SIZE04:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK1: [[MTYPE04:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710672] // CK1: [[MTYPE05:@.+]] = {{.+}}constant [1 x i64] [i64 1026] @@ -155,6 +155,7 @@ // CK1-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK1-DAG: store [[ST]]* @gb, [[ST]]** [[CBP0]] // CK1-DAG: store double** getelementptr inbounds ([[ST]], [[ST]]* @gb, i32 0, i32 1), double*** [[CP0]] + // CK1-DAG: store i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE04]], i32 0, i32 0), // CK1-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 @@ -266,6 +267,7 @@ } }; +// CK2: [[SIZES:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK2: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710676] // CK2-LABEL: _Z3bari @@ -278,21 +280,19 @@ // CK2-NOT: __tgt_target_data_begin // CK2: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK2: [[IFTHEN]] -// CK2-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz:.+]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) +// CK2-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZES]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK2-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK2-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK2-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK2-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK2-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK2-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK2-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK2-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK2-DAG: store [[ST]]* [[VAR0:%[^,]+]], [[ST]]** [[CBP0]] // CK2-DAG: store double** [[SEC0:%[^,]+]], double*** [[CP0]] -// CK2-DAG: store i64 [[CSVAL0:%[^,]+]], i64* [[S0]] +// CK2-DAG: store i64 [[CSVAL0:%[^,]+]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0), // CK2-DAG: [[SEC0]] = getelementptr inbounds {{.*}}[[ST]]* [[VAR0]], i32 0, i32 1 // CK2-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 @@ -370,6 +370,7 @@ } }; +// CK4: [[SIZES:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK4: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976711700] // CK4-LABEL: _Z3bari @@ -382,21 +383,19 @@ // CK4-NOT: __tgt_target_data_begin // CK4: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK4: [[IFTHEN]] -// CK4-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz:.+]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) +// CK4-DAG: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZES]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK4-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK4-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK4-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK4-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK4-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK4-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK4-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK4-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK4-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[STT]]** // CK4-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK4-DAG: store [[STT]]* [[VAR0:%[^,]+]], [[STT]]** [[CBP0]] // CK4-DAG: store double** [[SEC0:%[^,]+]], double*** [[CP0]] -// CK4-DAG: store i64 [[CSVAL0:%[^,]+]], i64* [[S0]] +// CK4-DAG: store i64 [[CSVAL0:%[^,]+]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0), // CK4-DAG: [[SEC0]] = getelementptr inbounds {{.*}}[[STT]]* [[VAR0]], i32 0, i32 1 // CK4-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 diff --git a/clang/test/OpenMP/target_exit_data_depend_codegen.cpp b/clang/test/OpenMP/target_exit_data_depend_codegen.cpp --- a/clang/test/OpenMP/target_exit_data_depend_codegen.cpp +++ b/clang/test/OpenMP/target_exit_data_depend_codegen.cpp @@ -37,7 +37,7 @@ // CK1: [[MTYPE03:@.+]] = {{.+}}constant [1 x i64] [i64 2] -// CK1: [[SIZE04:@.+]] = {{.+}}constant [2 x i64] [i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64 24] +// CK1: [[SIZE04:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK1: [[MTYPE04:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710674] // CK1-LABEL: _Z3fooi @@ -290,6 +290,7 @@ // CK1: [[P0:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[P:%.+]], i32 0, i32 0 // CK1: [[P0_BC:%.+]] = bitcast i8** [[P0]] to double*** // CK1: store double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), double*** [[P0_BC]], + // CK1: store i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE04]], i32 0, i32 0), // CK1: [[BP1:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BP]], i32 0, i32 1 // CK1: [[BP1_BC:%.+]] = bitcast i8** [[BP1]] to double*** // CK1: store double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), double*** [[BP1_BC]], diff --git a/clang/test/OpenMP/target_firstprivate_codegen.cpp b/clang/test/OpenMP/target_firstprivate_codegen.cpp --- a/clang/test/OpenMP/target_firstprivate_codegen.cpp +++ b/clang/test/OpenMP/target_firstprivate_codegen.cpp @@ -57,9 +57,11 @@ // CHECK-DAG: [[SIZET:@.+]] = private unnamed_addr constant [3 x i{{32|64}}] [i[[SZ:32|64]] 4, i{{64|32}} {{8|4}}, i[[SZ:32|64]] 4] // CHECK-DAG: [[MAPT:@.+]] = private unnamed_addr constant [3 x i64] [i64 288, i64 49, i64 288] +// CHECK-DAG: [[SIZET2:@.+]] = private unnamed_addr global [9 x i64] [i64 2, i64 40, i64 {{4|8}}, i64 0, i64 400, i64 {{4|8}}, i64 {{4|8}}, i64 0, i64 {{12|16}}] // CHECK-DAG: [[MAPT2:@.+]] = private unnamed_addr constant [9 x i64] [i64 288, i64 161, i64 800, i64 161, i64 161, i64 800, i64 800, i64 161, i64 161] // CHECK-DAG: [[SIZET3:@.+]] = private unnamed_addr constant [2 x i{{32|64}}] [i{{32|64}} 0, i{{32|64}} 8] // CHECK-DAG: [[MAPT3:@.+]] = private unnamed_addr constant [2 x i64] [i64 32, i64 161] +// CHECK-DAG: [[SIZET4:@.+]] = private unnamed_addr global [5 x i64] [i64 8, i64 4, i64 {{4|8}}, i64 {{4|8}}, i64 0] // CHECK-DAG: [[MAPT4:@.+]] = private unnamed_addr constant [5 x i64] [i64 547, i64 288, i64 800, i64 800, i64 161] // CHECK-DAG: [[SIZET5:@.+]] = private unnamed_addr constant [3 x i{{32|64}}] [i[[SZ]] 4, i[[SZ]] 1, i[[SZ]] 40] // CHECK-DAG: [[MAPT5:@.+]] = private unnamed_addr constant [3 x i64] [i64 288, i64 288, i64 161] @@ -99,7 +101,6 @@ // CHECK: [[A2CAST:%.+]] = alloca i{{[0-9]+}}, // CHECK: [[BASE_PTR_ARR2:%.+]] = alloca [9 x i8*], // CHECK: [[PTR_ARR2:%.+]] = alloca [9 x i8*], - // CHECK: [[SIZET2:%.+]] = alloca [9 x i{{[0-9]+}}], // CHECK: [[BASE_PTR_ARR3:%.+]] = alloca [2 x i8*], // CHECK: [[PTR_ARR3:%.+]] = alloca [2 x i8*], // CHECK: [[N_ADDR_VAL:%.+]] = load i{{[0-9]+}}, i{{[0-9]+}}* [[N_ADDR]], @@ -184,8 +185,6 @@ // CHECK: [[PTR_GEP2_0:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK: [[ACAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_0]] to i{{[0-9]+}}* // CHECK: store i{{[0-9]+}} [[A2CAST_VAL]], i{{[0-9]+}}* [[ACAST_TOPTR]], - // CHECK: [[SIZE_GEPA2:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[SIZE_GEPA2]], // firstprivate(b): base_ptr = &b[0], ptr = &b[0], size = 40 (sizeof(float)*10) // CHECK: [[BASE_PTR_GEP2_1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -194,8 +193,6 @@ // CHECK: [[PTR_GEP2_1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_1]] to [10 x float]** // CHECK: store [10 x float]* [[B]], [10 x float]** [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPB:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK: store i{{[0-9]+}} 40, i{{[0-9]+}}* [[SIZE_GEPB]], // firstprivate(bn), 2 entries, n and bn: (1) base_ptr = n, ptr = n, size = 8 ; (2) base_ptr = &c[0], ptr = &c[0], size = n*sizeof(float) // CHECK: [[BASE_PTR_GEP2_2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 @@ -206,13 +203,10 @@ // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_2]] to i{{[0-9]+}}* // CHECK-64: store i{{[0-9]+}} [[N_EXT]], i{{[0-9]+}}* [[BCAST_TOPTR]], // CHECK-32: store i{{[0-9]+}} [[N_ADDR_VAL]], i{{[0-9]+}}* [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPBN_1:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 - // CHECK: store i{{[0-9]+}} {{[0-9]}}, i{{[0-9]+}}* [[SIZE_GEPBN_1]], // CHECK: [[BASE_PTR_GEP2_3:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[BASE_PTR_GEP2_3]] to float** // CHECK: store float* [[BN_VLA]], float** [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPBN_3:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 - // CHECK: store i{{[0-9]+}} [[BN_SIZE]], i{{[0-9]+}}* [[SIZE_GEPBN_3]] + // CHECK: store i{{[0-9]+}} [[BN_SIZE]], i{{[0-9]+}}* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET2]], i32 0, i32 3), // firstprivate(c): base_ptr = &c[0], ptr = &c[0], size = 400 (5*10*sizeof(double)) // CHECK: [[BASE_PTR_GEP2_4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 @@ -221,8 +215,6 @@ // CHECK: [[PTR_GEP2_4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_4]] to [5 x [10 x double]]** // CHECK: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPC_4:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 - // CHECK: store i{{[0-9]+}} 400, i{{[0-9]+}}* [[SIZE_GEPC_4]], // firstprivate(cn), 3 entries, 5, n, cn: (1) base_ptr = 5, ptr = 5, size = 8; (2) (1) base_ptr = n, ptr = n, size = 8; (3) base_ptr = &cn[0], ptr = &cn[0], size = 5*n*sizeof(double) // CHECK: [[BASE_PTR_GEP2_5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 5 @@ -231,8 +223,6 @@ // CHECK: [[PTR_GEP2_5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 5 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_5]] to i{{[0-9]+}}* // CHECK: store i{{[0-9]+}} 5, i{{[0-9]+}}* [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPCN_5:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 5 - // CHECK: store i{{[0-9]+}} {{[0-9]}}, i{{[0-9]+}}* [[SIZE_GEPCN_5]], // CHECK: [[BASE_PTR_GEP2_6:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 6 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[BASE_PTR_GEP2_6]] to i{{[0-9]+}}* // CHECK-64: store i{{[0-9]+}} [[N_EXT2]], i{{[0-9]+}}* [[BCAST_TOPTR]], @@ -241,16 +231,13 @@ // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_6]] to i{{[0-9]+}}* // CHECK-64: store i{{[0-9]+}} [[N_EXT2]], i{{[0-9]+}}* [[BCAST_TOPTR]], // CHECK-32: store i{{[0-9]+}} [[N_ADDR_VAL2]], i{{[0-9]+}}* [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPCN_6:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 6 - // CHECK: store i{{[0-9]+}} {{[0-9]}}, i{{[0-9]+}}* [[SIZE_GEPCN_6]], // CHECK: [[BASE_PTR_GEP2_7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 7 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[BASE_PTR_GEP2_7]] to double** // CHECK: store double* [[CN_VLA]], double** [[BCAST_TOPTR]], // CHECK: [[PTR_GEP2_7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 7 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_7]] to double** // CHECK: store double* [[CN_VLA]], double** [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPCN_7:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 7 - // CHECK: store i{{[0-9]+}} [[CN_SIZE_2]], i{{[0-9]+}}* [[SIZE_GEPCN_7]], + // CHECK: store i{{[0-9]+}} [[CN_SIZE_2]], i{{[0-9]+}}* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET2]], i32 0, i32 7), // firstprivate(d): base_ptr = &d, ptr = &d, size = 16 // CHECK: [[BASE_PTR_GEP2_8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 8 @@ -259,13 +246,10 @@ // CHECK: [[PTR_GEP2_8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 8 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTR_GEP2_8]] to [[TT]]** // CHECK: store [[TT]]* [[D]], [[TT]]** [[BCAST_TOPTR]], - // CHECK: [[SIZE_GEPCN_8:%.+]] = getelementptr inbounds [9 x i{{[0-9]+}}], [9 x i{{[0-9]+}}]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 8 - // CHECK: store i{{[0-9]+}} {{[0-9]+}}, i{{[0-9]+}}* [[SIZE_GEPCN_8]], // CHECK: [[BASE_PTR_GEP_ARG2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BASE_PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK: [[PTR_GEP_ARG2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[PTR_ARR2]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK: [[SIZES_ARG2:%.+]] = getelementptr inbounds [9 x i[[SZ]]], [9 x i[[SZ]]]* [[SIZET2]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK: {{.+}} = call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, {{.+}}, i32 9, i8** [[BASE_PTR_GEP_ARG2]], i8** [[PTR_GEP_ARG2]], i[[SZ]]* [[SIZES_ARG2]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[MAPT2]], i32 0, i32 0), i8** null, i8** null) + // CHECK: {{.+}} = call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, {{.+}}, i32 9, i8** [[BASE_PTR_GEP_ARG2]], i8** [[PTR_GEP_ARG2]], i[[SZ]]* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET2]], i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[MAPT2]], i32 0, i32 0), i8** null, i8** null) // make sure that firstprivate variables are generated in all cases and that we use those instances for operations inside the // target region @@ -448,7 +432,6 @@ // CHECK: define{{.+}} i32 {{.+}}([[S1]]* {{.+}}, i{{[0-9]+}} {{.+}}) // CHECK: [[BASE_PTRS4:%.+]] = alloca [5 x i8*], // CHECK: [[PTRS4:%.+]] = alloca [5 x i8*], - // CHECK: [[SIZET4:%.+]] = alloca [5 x i{{[0-9]+}}], // map(this: this ptr is implicitly captured (not firstprivate matter) // CHECK: [[BP0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 @@ -457,8 +440,6 @@ // CHECK: [[P0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 // CHECK: [[CP0:%.+]] = bitcast i8** [[P0]] to double** // CHECK: store double* [[A:%.+]], double** [[CP0]], - // CHECK: [[SZ0:%.+]] = getelementptr inbounds [5 x i{{[0-9]+}}], [5 x i{{[0-9]+}}]* [[SIZET4]], i{{[0-9]+}} 0, i{{[0-9]+}} 0 - // CHECK: store i{{[0-9]+}} 8, i{{[0-9]+}}* [[SZ0]], // firstprivate(b): base_ptr = b, ptr = b, size = 4 (pass by-value) // CHECK: [[BASE_PTRS_GEP4_1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 @@ -467,8 +448,6 @@ // CHECK: [[PTRS_GEP4_1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTRS_GEP4_1]] to i{{[0-9]+}}* // CHECK: store i{{[0-9]+}} [[B_CAST]], i{{[0-9]+}}* [[BCAST_TOPTR]], - // CHECK: [[SIZES_GEP4_1:%.+]] = getelementptr inbounds [5 x i{{[0-9]+}}], [5 x i{{[0-9]+}}]* [[SIZET4]], i{{[0-9]+}} 0, i{{[0-9]+}} 1 - // CHECK: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIZES_GEP4_1]], // firstprivate(c), 3 entries: 2, n, c // CHECK: [[BASE_PTRS_GEP4_2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 @@ -477,29 +456,22 @@ // CHECK: [[PTRS_GEP4_2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTRS_GEP4_2]] to i{{[0-9]+}}* // CHECK: store i{{[0-9]+}} 2, i{{[0-9]+}}* [[BCAST_TOPTR]], - // CHECK: [[SIZES_GEP4_2:%.+]] = getelementptr inbounds [5 x i{{[0-9]+}}], [5 x i{{[0-9]+}}]* [[SIZET4]], i{{[0-9]+}} 0, i{{[0-9]+}} 2 - // CHECK-64: store i{{[0-9]+}} 8, i{{[0-9]+}}* [[SIZES_GEP4_2]], - // CHECK-32: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIZES_GEP4_2]], // CHECK: [[BASE_PTRS_GEP4_3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[BASE_PTRS_GEP4_3]] to i{{[0-9]+}}* // CHECK: store i{{[0-9]+}} [[N:%.+]], i{{[0-9]+}}* [[BCAST_TOPTR]], // CHECK: [[PTRS_GEP4_3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTRS_GEP4_3]] to i{{[0-9]+}}* // CHECK: store i{{[0-9]+}} [[N]], i{{[0-9]+}}* [[BCAST_TOPTR]], - // CHECK: [[SIZES_GEP4_3:%.+]] = getelementptr inbounds [5 x i{{[0-9]+}}], [5 x i{{[0-9]+}}]* [[SIZET4]], i{{[0-9]+}} 0, i{{[0-9]+}} 3 - // CHECK-64: store i{{[0-9]+}} 8, i{{[0-9]+}}* [[SIZES_GEP4_3]], - // CHECK-32: store i{{[0-9]+}} 4, i{{[0-9]+}}* [[SIZES_GEP4_3]], // CHECK: [[BASE_PTRS_GEP4_4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BASE_PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[BASE_PTRS_GEP4_4]] to i{{[0-9]+}}** // CHECK: store i{{[0-9]+}}* [[B:%.+]], i{{[0-9]+}}** [[BCAST_TOPTR]], // CHECK: [[PTRS_GEP4_4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[PTRS4]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 // CHECK: [[BCAST_TOPTR:%.+]] = bitcast i8** [[PTRS_GEP4_4]] to i{{[0-9]+}}** // CHECK: store i{{[0-9]+}}* [[B]], i{{[0-9]+}}** [[BCAST_TOPTR]], - // CHECK: [[SIZES_GEP4_4:%.+]] = getelementptr inbounds [5 x i{{[0-9]+}}], [5 x i{{[0-9]+}}]* [[SIZET4]], i{{[0-9]+}} 0, i{{[0-9]+}} 4 - // CHECK: store i{{[0-9]+}} [[B_SIZE:%.+]], i{{[0-9]+}}* [[SIZES_GEP4_4]], + // CHECK: store i{{[0-9]+}} [[B_SIZE:%.+]], i{{[0-9]+}}* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZET4]], i32 0, i32 4), // only check that we use the map types stored in the global variable - // CHECK: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, {{.+}}, i32 5, i8** {{.+}}, i8** {{.+}}, i{{[0-9]+}}* {{.+}}, i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPT4]], i32 0, i32 0), i8** null, i8** null) + // CHECK: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, {{.+}}, i32 5, i8** {{.+}}, i8** {{.+}}, i{{[0-9]+}}* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZET4]], i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPT4]], i32 0, i32 0), i8** null, i8** null) // TCHECK: define weak void @__omp_offloading_{{.+}}([[S1]]* noundef [[TH:%.+]], i{{[0-9]+}} noundef [[B_IN:%.+]], i{{[0-9]+}} noundef [[VLA:%.+]], i{{[0-9]+}} noundef [[VLA1:%.+]], i{{[0-9]+}}{{.+}} [[C_IN:%.+]]) // TCHECK: [[TH_ADDR:%.+]] = alloca [[S1]]*, diff --git a/clang/test/OpenMP/target_map_codegen_12.cpp b/clang/test/OpenMP/target_map_codegen_12.cpp --- a/clang/test/OpenMP/target_map_codegen_12.cpp +++ b/clang/test/OpenMP/target_map_codegen_12.cpp @@ -35,7 +35,7 @@ // CK13-LABEL: @.__omp_offloading_{{.*}}implicit_maps_variable_length_array{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 -// We don't have a constant map size for VLAs. +// CK13-DAG: [[SIZES:@.+]] = {{.+}}global [3 x i64] [i64 {{8|4}}, i64 {{8|4}}, i64 0] // Map types: // - OMP_MAP_PRIVATE_VAL + OMP_MAP_TARGET_PARAM + OMP_MAP_IMPLICIT = 800 (vla size) // - OMP_MAP_PRIVATE_VAL + OMP_MAP_TARGET_PARAM + OMP_MAP_IMPLICIT = 800 (vla size) @@ -46,37 +46,31 @@ void implicit_maps_variable_length_array (int a){ double vla[2][a]; - // CK13-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 3, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], i64* [[SGEP:%[^,]+]], {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) + // CK13-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 3, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], {{.+}}[[SIZES]]{{.+}}, {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) // CK13-DAG: [[BPGEP]] = getelementptr inbounds {{.+}}[[BPS:%[^,]+]], i32 0, i32 0 // CK13-DAG: [[PGEP]] = getelementptr inbounds {{.+}}[[PS:%[^,]+]], i32 0, i32 0 - // CK13-DAG: [[SGEP]] = getelementptr inbounds {{.+}}[[SS:%[^,]+]], i32 0, i32 0 // CK13-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 0 // CK13-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 - // CK13-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[SS]], i32 0, i32 0 // CK13-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[sz:64|32]]* // CK13-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[sz]]* // CK13-DAG: store i[[sz]] 2, i[[sz]]* [[CBP0]] // CK13-DAG: store i[[sz]] 2, i[[sz]]* [[CP0]] - // CK13-DAG: store i64 {{8|4}}, i64* [[S0]], // CK13-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 1 // CK13-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 1 - // CK13-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[SS]], i32 0, i32 1 // CK13-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i[[sz]]* // CK13-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[sz]]* // CK13-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP1]] // CK13-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP1]] - // CK13-DAG: store i64 {{8|4}}, i64* [[S1]], // CK13-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 2 // CK13-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 2 - // CK13-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[SS]], i32 0, i32 2 // CK13-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to double** // CK13-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK13-DAG: store double* [[DECL:%.+]], double** [[CBP2]] // CK13-DAG: store double* [[DECL]], double** [[CP2]] - // CK13-DAG: store i64 [[VALS2:%.+]], i64* [[S2]], + // CK13-DAG: store i64 [[VALS2:%.+]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZES]], i32 0, i32 2), // CK13-DAG: [[VALS2]] = {{mul nuw i64 %.+, 8|sext i32 %.+ to i64}} // CK13: call void [[KERNEL:@.+]](i[[sz]] {{.+}}, i[[sz]] {{.+}}, double* [[DECL]]) diff --git a/clang/test/OpenMP/target_map_codegen_13.cpp b/clang/test/OpenMP/target_map_codegen_13.cpp --- a/clang/test/OpenMP/target_map_codegen_13.cpp +++ b/clang/test/OpenMP/target_map_codegen_13.cpp @@ -38,6 +38,7 @@ // CK14-LABEL: @.__omp_offloading_{{.*}}foo{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK14-DAG: [[SIZES:@.+]] = {{.+}}global [4 x i64] [i64 0, i64 4, i64 8, i64 4] // Map types: // - OMP_MAP_TARGET_PARAM = 32 // - OMP_MAP_TO + OMP_MAP_FROM | OMP_MAP_IMPLICIT | OMP_MAP_MEMBER_OF = 281474976711171 @@ -65,46 +66,38 @@ SSS sss(a, (double)a); // CK14: define {{.*}}void @{{.+}}foo{{.+}}([[ST]]* {{[^,]+}}, i32 {{[^,]+}}) - // CK14-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 4, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], i64* [[SIZES:%[^,]+]], {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) + // CK14-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 4, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], {{.+}}[[SIZES]]{{.+}}, {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) // CK14-DAG: [[BPGEP]] = getelementptr inbounds {{.+}}[[BPS:%[^,]+]], i32 0, i32 0 // CK14-DAG: [[PGEP]] = getelementptr inbounds {{.+}}[[PS:%[^,]+]], i32 0, i32 0 - // CK14-DAG: [[SIZES]] = getelementptr inbounds {{.+}}[[S:%[^,]+]], i32 0, i32 0 // CK14-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 0 // CK14-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 - // CK14-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 0 // CK14-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK14-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK14-DAG: store [[ST]]* [[DECL:%.+]], [[ST]]** [[CBP0]] // CK14-DAG: store i32* [[A:%.+]], i32** [[CP0]] - // CK14-DAG: store i64 %{{.+}}, i64* [[S0]] + // CK14-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZES]], i32 0, i32 0), // CK14-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 1 // CK14-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 1 - // CK14-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 1 // CK14-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[ST]]** // CK14-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK14-DAG: store [[ST]]* [[DECL]], [[ST]]** [[CBP1]] // CK14-DAG: store i32* [[A]], i32** [[CP1]] - // CK14-DAG: store i64 4, i64* [[S1]] // CK14-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 2 // CK14-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 2 - // CK14-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 2 // CK14-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[ST]]** // CK14-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK14-DAG: store [[ST]]* [[DECL]], [[ST]]** [[CBP2]] // CK14-DAG: store double* %{{.+}}, double** [[CP2]] - // CK14-DAG: store i64 8, i64* [[S2]] // CK14-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 3 // CK14-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 3 - // CK14-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 3 // CK14-DAG: [[CBP3:%.+]] = bitcast i8** [[BP3]] to i[[sz:64|32]]* // CK14-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i[[sz]]* // CK14-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP3]] // CK14-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP3]] - // CK14-DAG: store i64 4, i64* [[S3]] // CK14-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], // CK14-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK14-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_14.cpp b/clang/test/OpenMP/target_map_codegen_14.cpp --- a/clang/test/OpenMP/target_map_codegen_14.cpp +++ b/clang/test/OpenMP/target_map_codegen_14.cpp @@ -34,12 +34,14 @@ #ifdef CK15 // CK15: [[ST:%.+]] = type { i32, double } +// CK15: [[SIZES:@.+]] = {{.+}}global [4 x i64] [i64 0, i64 4, i64 8, i64 4] // Map types: // - OMP_MAP_TARGET_PARAM = 32 // - OMP_MAP_TO + OMP_MAP_FROM | OMP_MAP_IMPLICIT | OMP_MAP_MEMBER_OF = 281474976711171 // - OMP_MAP_PRIVATE_VAL + OMP_MAP_TARGET_PARAM | OMP_MAP_IMPLICIT = 800 // CK15: [[TYPES:@.+]] = {{.+}}constant [4 x i64] [i64 32, i64 281474976711171, i64 281474976711171, i64 800] +// CK15: [[SIZES2:@.+]] = {{.+}}global [4 x i64] [i64 0, i64 4, i64 8, i64 4] // Map types: // - OMP_MAP_TARGET_PARAM = 32 // - OMP_MAP_TO + OMP_MAP_FROM | OMP_MAP_IMPLICIT | OMP_MAP_MEMBER_OF = 281474976711171 @@ -76,46 +78,38 @@ SSST<123> ssst(a, (double)a); // CK15: define {{.*}}void @{{.+}}foo{{.+}}([[ST]]* {{[^,]+}}, i32 {{[^,]+}}) - // CK15-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 4, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], i64* [[SIZES:%[^,]+]], {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) + // CK15-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 4, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], {{.+}}[[SIZES]]{{.+}}, {{.+}}[[TYPES]]{{.+}}, i8** null, i8** null) // CK15-DAG: [[BPGEP]] = getelementptr inbounds {{.+}}[[BPS:%[^,]+]], i32 0, i32 0 // CK15-DAG: [[PGEP]] = getelementptr inbounds {{.+}}[[PS:%[^,]+]], i32 0, i32 0 - // CK15-DAG: [[SIZES]] = getelementptr inbounds {{.+}}[[S:%[^,]+]], i32 0, i32 0 // CK15-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 0 // CK15-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 - // CK15-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 0 // CK15-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK15-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK15-DAG: store [[ST]]* [[DECL:%.+]], [[ST]]** [[CBP0]] // CK15-DAG: store i32* [[A:%.+]], i32** [[CP0]] - // CK15-DAG: store i64 %{{.+}}, i64* [[S0]] + // CK15-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZES]], i32 0, i32 0), // CK15-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 1 // CK15-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 1 - // CK15-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 1 // CK15-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[ST]]** // CK15-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK15-DAG: store [[ST]]* [[DECL]], [[ST]]** [[CBP1]] // CK15-DAG: store i32* [[A]], i32** [[CP1]] - // CK15-DAG: store i64 4, i64* [[S1]] // CK15-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 2 // CK15-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 2 - // CK15-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 2 // CK15-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[ST]]** // CK15-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK15-DAG: store [[ST]]* [[DECL]], [[ST]]** [[CBP2]] // CK15-DAG: store double* %{{.+}}, double** [[CP2]] - // CK15-DAG: store i64 8, i64* [[S2]] // CK15-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 3 // CK15-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 3 - // CK15-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 3 // CK15-DAG: [[CBP3:%.+]] = bitcast i8** [[BP3]] to i[[sz:64|32]]* // CK15-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i[[sz]]* // CK15-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP3]] // CK15-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP3]] - // CK15-DAG: store i64 4, i64* [[S3]] // CK15-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], // CK15-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK15-64-DAG: store i32 {{.+}}, i32* [[CADDR]], @@ -124,46 +118,38 @@ ssst.foo(456); // CK15: define {{.*}}void @{{.+}}bar{{.+}}([[ST]]* {{[^,]+}}, i32 {{[^,]+}}) - // CK15-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 4, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], i64* [[SIZES:[^,]+]], {{.+}}[[TYPES2]]{{.+}}, i8** null, i8** null) + // CK15-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{.+}}, i8* {{.+}}, i32 4, i8** [[BPGEP:%[0-9]+]], i8** [[PGEP:%[0-9]+]], {{.+}}[[SIZES2]]{{.+}}, {{.+}}[[TYPES2]]{{.+}}, i8** null, i8** null) // CK15-DAG: [[BPGEP]] = getelementptr inbounds {{.+}}[[BPS:%[^,]+]], i32 0, i32 0 // CK15-DAG: [[PGEP]] = getelementptr inbounds {{.+}}[[PS:%[^,]+]], i32 0, i32 0 - // CK15-DAG: [[SIZES]] = getelementptr inbounds {{.+}}[[S:%[^,]+]], i32 0, i32 0 // CK15-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 0 // CK15-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 0 - // CK15-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 0 // CK15-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK15-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK15-DAG: store [[ST]]* [[DECL:%.+]], [[ST]]** [[CBP0]] // CK15-DAG: store i32* [[A:%.+]], i32** [[CP0]] - // CK15-DAG: store i64 %{{.+}}, i64* [[S0]] + // CK15-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZES2]], i32 0, i32 0), // CK15-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 1 // CK15-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 1 - // CK15-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 1 // CK15-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[ST]]** // CK15-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK15-DAG: store [[ST]]* [[DECL]], [[ST]]** [[CBP1]] // CK15-DAG: store i32* [[A]], i32** [[CP1]] - // CK15-DAG: store i64 4, i64* [[S1]] // CK15-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 2 // CK15-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 2 - // CK15-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 2 // CK15-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[ST]]** // CK15-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK15-DAG: store [[ST]]* [[DECL]], [[ST]]** [[CBP2]] // CK15-DAG: store double* %{{.+}}, double** [[CP2]] - // CK15-DAG: store i64 8, i64* [[S2]] // CK15-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BPS]], i32 0, i32 3 // CK15-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[PS]], i32 0, i32 3 - // CK15-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i32 0, i32 3 // CK15-DAG: [[CBP3:%.+]] = bitcast i8** [[BP3]] to i[[sz]]* // CK15-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i[[sz]]* // CK15-DAG: store i[[sz]] [[VAL:%.+]], i[[sz]]* [[CBP3]] // CK15-DAG: store i[[sz]] [[VAL]], i[[sz]]* [[CP3]] - // CK15-DAG: store i64 4, i64* [[S3]] // CK15-DAG: [[VAL]] = load i[[sz]], i[[sz]]* [[ADDR:%.+]], // CK15-64-DAG: [[CADDR:%.+]] = bitcast i[[sz]]* [[ADDR]] to i32* // CK15-64-DAG: store i32 {{.+}}, i32* [[CADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_18.inc b/clang/test/OpenMP/target_map_codegen_18.inc --- a/clang/test/OpenMP/target_map_codegen_18.inc +++ b/clang/test/OpenMP/target_map_codegen_18.inc @@ -91,6 +91,7 @@ // CK19-NOUSE: [[MTYPE15:@.+]] = private {{.*}}constant [1 x i64] [i64 2] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE16:@.+]] = private {{.*}}global [2 x i64] [i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE16:@.+]] = private {{.*}}constant [2 x i64] [i64 800, i64 33] // CK19-NOUSE: [[MTYPE16:@.+]] = private {{.*}}constant [1 x i64] [i64 1] @@ -107,6 +108,7 @@ // CK19-NOUSE: [[MTYPE18:@.+]] = private {{.*}}constant [1 x i64] [i64 3] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE19:@.+]] = private {{.*}}global [2 x i64] [i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE19:@.+]] = private {{.*}}constant [2 x i64] [i64 800, i64 32] // CK19-NOUSE: [[MTYPE19:@.+]] = private {{.*}}constant [1 x i64] zeroinitializer @@ -117,6 +119,7 @@ // CK19-NOUSE: [[MTYPE20:@.+]] = private {{.*}}constant [1 x i64] [i64 1] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE21:@.+]] = private {{.*}}global [2 x i64] [i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE21:@.+]] = private {{.*}}constant [2 x i64] [i64 800, i64 35] // CK19-NOUSE: [[MTYPE21:@.+]] = private {{.*}}constant [1 x i64] [i64 3] @@ -162,6 +165,7 @@ // CK19-NOUSE: [[MTYPE29:@.+]] = private {{.*}}constant [3 x i64] [i64 3, i64 16, i64 19] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE30:@.+]] = private {{.*}}global [4 x i64] [i64 {{8|4}}, i64 {{8|4}}, i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE30:@.+]] = private {{.*}}constant [4 x i64] [i64 800, i64 800, i64 800, i64 35] // CK19-NOUSE: [[MTYPE30:@.+]] = private {{.*}}constant [1 x i64] [i64 3] @@ -196,18 +200,22 @@ // CK19-NOUSE: [[MTYPE36:@.+]] = private {{.*}}constant [1 x i64] [i64 3] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE37:@.+]] = private {{.*}}global [3 x i64] [i64 {{8|4}}, i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE37:@.+]] = private {{.*}}constant [3 x i64] [i64 800, i64 800, i64 35] // CK19-NOUSE: [[MTYPE37:@.+]] = private {{.*}}constant [1 x i64] [i64 3] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE38:@.+]] = private {{.*}}global [3 x i64] [i64 {{8|4}}, i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE38:@.+]] = private {{.*}}constant [3 x i64] [i64 800, i64 800, i64 35] // CK19-NOUSE: [[MTYPE38:@.+]] = private {{.*}}constant [1 x i64] [i64 3] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE39:@.+]] = private {{.*}}global [3 x i64] [i64 {{8|4}}, i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE39:@.+]] = private {{.*}}constant [3 x i64] [i64 800, i64 800, i64 35] // CK19-NOUSE: [[MTYPE39:@.+]] = private {{.*}}constant [1 x i64] [i64 3] // CK19-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK19-USE: [[SIZE40:@.+]] = private {{.*}}global [3 x i64] [i64 {{8|4}}, i64 {{8|4}}, i64 0] // CK19-USE: [[MTYPE40:@.+]] = private {{.*}}constant [3 x i64] [i64 800, i64 800, i64 35] // CK19-NOUSE: [[MTYPE40:@.+]] = private {{.*}}constant [1 x i64] [i64 3] @@ -648,30 +656,28 @@ int va[ii]; // Region 16 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|2}} x i{{.+}}]* [[MTYPE16]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE16]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE16]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE16]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z:64|32]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] {{%.+}}, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] {{%.+}}, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i{{.+}} {{8|4}}, i{{.+}}* [[S0]] // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i32** // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK19-USE-DAG: store i32* [[VAR1:%.+]], i32** [[CBP1]] // CK19-USE-DAG: store i32* [[VAR1]], i32** [[CP1]] - // CK19-USE-DAG: store i{{.+}} [[CSVAL1:%[^,]+]], i{{.+}}* [[S1]] + // CK19-USE-DAG: store i{{.+}} [[CSVAL1:%[^,]+]], i{{.+}}* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE16]], i32 0, i32 1), // CK19-USE-DAG: [[CSVAL1]] = {{mul nuw i64 %.*, 4|sext i32 .+ to i64}} + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 @@ -766,31 +772,29 @@ } // Region 19 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|2}} x i{{.+}}]* [[MTYPE19]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE19]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE19]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE19]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] {{%.+}}, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] {{%.+}}, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i{{.+}} {{8|4}}, i{{.+}}* [[S0]] // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i32** // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK19-USE-DAG: store i32* [[VAR1:%.+]], i32** [[CBP1]] // CK19-USE-DAG: store i32* [[SEC1:%.+]], i32** [[CP1]] - // CK19-USE-DAG: store i{{.+}} [[CSVAL1:%[^,]+]], i{{.+}}* [[S1]] + // CK19-USE-DAG: store i{{.+}} [[CSVAL1:%[^,]+]], i{{.+}}* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE19]], i32 0, i32 1), // CK19-USE-DAG: [[CSVAL1]] = {{mul nuw i64 %.*, 4|sext i32 .+ to i64}} // CK19-USE-DAG: [[SEC1]] = getelementptr {{.*}}i32* [[VAR1]], i{{.+}} 0 + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 @@ -849,31 +853,29 @@ } // Region 21 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|2}} x i{{.+}}]* [[MTYPE21]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE21]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE21]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|2}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE21]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] {{%.+}}, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] {{%.+}}, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i{{.+}} {{8|4}}, i{{.+}}* [[S0]] // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i32** // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK19-USE-DAG: store i32* [[VAR1:%.+]], i32** [[CBP1]] // CK19-USE-DAG: store i32* [[SEC1:%.+]], i32** [[CP1]] - // CK19-USE-DAG: store i{{.+}} [[CSVAL1:%[^,]+]], i{{.+}}* [[S1]] + // CK19-USE-DAG: store i{{.+}} [[CSVAL1:%[^,]+]], i{{.+}}* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE21]], i32 0, i32 1), // CK19-USE-DAG: [[CSVAL1]] = {{mul nuw i64 %.*, 4|sext i32 .+ to i64}} // CK19-USE-DAG: [[SEC1]] = getelementptr {{.*}}i32* [[VAR1]], i{{.+}} %{{.+}} + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 @@ -1150,52 +1152,46 @@ double mva[23][ii][ii+5]; // Region 30 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|4}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|4}} x i{{.+}}]* [[MTYPE30]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|4}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[SIZE30]]{{.+}}, {{.+}}getelementptr {{.+}}[{{1|4}} x i{{.+}}]* [[MTYPE30]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|4}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE30]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] 23, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] 23, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S0]] // // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i[[Z]]* // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] [[VAR1:%.+]], i[[Z]]* [[CBP1]] // CK19-USE-DAG: store i[[Z]] [[VAR11:%.+]], i[[Z]]* [[CP1]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S1]] // CK19-64-USE-DAG: [[VAR1]] = zext i32 %{{[^,]+}} to i64 // CK19-64-USE-DAG: [[VAR11]] = zext i32 %{{[^,]+}} to i64 // // CK19-USE-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK19-USE-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to i[[Z]]* // CK19-USE-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] [[VAR2:%.+]], i[[Z]]* [[CBP2]] // CK19-USE-DAG: store i[[Z]] [[VAR22:%.+]], i[[Z]]* [[CP2]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S2]] // CK19-64-USE-DAG: [[VAR2]] = zext i32 %{{[^,]+}} to i64 // CK19-64-USE-DAG: [[VAR22]] = zext i32 %{{[^,]+}} to i64 // // CK19-USE-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 3 // CK19-USE-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 3 - // CK19-USE-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 3 // CK19-USE-DAG: [[CBP3:%.+]] = bitcast i8** [[BP3]] to double** // CK19-USE-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to double** // CK19-USE-DAG: store double* [[VAR3:%.+]], double** [[CBP3]] // CK19-USE-DAG: store double* [[VAR3]], double** [[CP3]] - // CK19-USE-DAG: store i64 [[CSVAL3:%[^,]+]], i64* [[S3]] + // CK19-USE-DAG: store i64 [[CSVAL3:%[^,]+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE30]], i32 0, i32 3), // CK19-USE-DAG: [[CSVAL3]] = {{mul nuw i64 %[^,]+, 8|sext i32 .+ to i64}} + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 @@ -1397,39 +1393,35 @@ } // Region 37 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|3}} x i{{.+}}]* [[MTYPE37]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE37]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE37]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE37]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S0]] // // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i[[Z]]* // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] [[VAR1:%.+]], i[[Z]]* [[CBP1]] // CK19-USE-DAG: store i[[Z]] [[VAR11:%.+]], i[[Z]]* [[CP1]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S1]] // // CK19-USE-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK19-USE-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [13 x double]** // CK19-USE-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to [13 x double]** // CK19-USE-DAG: store [13 x double]* [[VAR2:%.+]], [13 x double]** [[CBP2]] // CK19-USE-DAG: store [13 x double]* [[VAR2]], [13 x double]** [[CP2]] - // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* [[S2]] + // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE37]], i32 0, i32 2), // CK19-USE-DAG: [[CSVAL2]] = {{mul nuw i64 %[^,]+, 104|sext i32 .+ to i64}} + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 @@ -1450,41 +1442,37 @@ } // Region 38 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|3}} x i{{.+}}]* [[MTYPE38]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE38]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE38]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE38]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S0]] // // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i[[Z]]* // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] [[VAR1:%.+]], i[[Z]]* [[CBP1]] // CK19-USE-DAG: store i[[Z]] [[VAR11:%.+]], i[[Z]]* [[CP1]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S1]] // // CK19-USE-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK19-USE-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [13 x double]** // CK19-USE-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to [13 x double]** // CK19-USE-DAG: store [13 x double]* [[VAR2:%.+]], [13 x double]** [[CBP2]] // CK19-USE-DAG: store [13 x double]* [[SEC2:%.+]], [13 x double]** [[CP2]] - // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* [[S2]] + // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE38]], i32 0, i32 2), // CK19-USE-DAG: [[SEC2]] = getelementptr {{.+}}[13 x double]* [[VAR2]], i[[Z]] [[SEC22:%[^,]+]] // CK19-USE-DAG: [[SEC22]] = mul nsw i[[Z]] 0, %{{[^,]+}} // CK19-USE-DAG: [[CSVAL2]] = {{mul nuw i64 %[^,]+, 104|sext i32 .+ to i64}} + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 @@ -1507,41 +1495,37 @@ } // Region 39 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|3}} x i{{.+}}]* [[MTYPE39]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE39]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE39]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE39]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S0]] // // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i[[Z]]* // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] [[VAR1:%.+]], i[[Z]]* [[CBP1]] // CK19-USE-DAG: store i[[Z]] [[VAR11:%.+]], i[[Z]]* [[CP1]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S1]] // // CK19-USE-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK19-USE-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [13 x double]** // CK19-USE-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to [13 x double]** // CK19-USE-DAG: store [13 x double]* [[VAR2:%.+]], [13 x double]** [[CBP2]] // CK19-USE-DAG: store [13 x double]* [[SEC2:%.+]], [13 x double]** [[CP2]] - // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* [[S2]] + // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE39]], i32 0, i32 2), // CK19-USE-DAG: [[SEC2]] = getelementptr {{.+}}[13 x double]* [[VAR2]], i[[Z]] [[SEC22:%[^,]+]] // CK19-USE-DAG: [[SEC22]] = mul nsw i[[Z]] 0, %{{[^,]+}} // CK19-USE-DAG: [[CSVAL2]] = {{mul nuw i64 %[^,]+, 104|sext i32 .+ to i64}} + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 @@ -1564,41 +1548,37 @@ } // Region 40 - // CK19-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[{{1|3}} x i{{.+}}]* [[MTYPE40]]{{.+}}, i8** null, i8** null) + // CK19-USE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE40]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE40]]{{.+}}, i8** null, i8** null) + // CK19-NOUSE-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 {{1|3}}, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[1 x i{{.+}}]* [[MTYPE40]]{{.+}}, i8** null, i8** null) // CK19-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK19-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK19-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // // CK19-USE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK19-USE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK19-USE-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to i[[Z]]* // CK19-USE-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CBP0]] // CK19-USE-DAG: store i[[Z]] 11, i[[Z]]* [[CP0]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S0]] // // CK19-USE-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK19-USE-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK19-USE-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to i[[Z]]* // CK19-USE-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i[[Z]]* // CK19-USE-DAG: store i[[Z]] [[VAR1:%.+]], i[[Z]]* [[CBP1]] // CK19-USE-DAG: store i[[Z]] [[VAR11:%.+]], i[[Z]]* [[CP1]] - // CK19-USE-DAG: store i64 {{8|4}}, i64* [[S1]] // // CK19-USE-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK19-USE-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK19-USE-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [13 x double]** // CK19-USE-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to [13 x double]** // CK19-USE-DAG: store [13 x double]* [[VAR2:%.+]], [13 x double]** [[CBP2]] // CK19-USE-DAG: store [13 x double]* [[SEC2:%.+]], [13 x double]** [[CP2]] - // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* [[S2]] + // CK19-USE-DAG: store i64 [[CSVAL2:%[^,]+]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE40]], i32 0, i32 2), // CK19-USE-DAG: [[SEC2]] = getelementptr {{.+}}[13 x double]* [[SEC22:%[^,]+]], i[[Z]] 0 // CK19-USE-DAG: [[SEC22]] = getelementptr {{.+}}[13 x double]* [[VAR2]], i[[Z]] [[SEC222:%[^,]+]] // CK19-USE-DAG: [[SEC222]] = mul nsw i[[Z]] 1, %{{[^,]+}} + // CK19-NOUSE-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK19-NOUSE-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 // CK19-NOUSE-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 diff --git a/clang/test/OpenMP/target_map_codegen_20.cpp b/clang/test/OpenMP/target_map_codegen_20.cpp --- a/clang/test/OpenMP/target_map_codegen_20.cpp +++ b/clang/test/OpenMP/target_map_codegen_20.cpp @@ -74,6 +74,7 @@ // CK21-NOUSE: [[MTYPE01:@.+]] = private {{.*}}constant [1 x i64] [i64 3] // CK21-LABEL: @.__omp_offloading_{{.*}}foo{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK21: [[SIZE02:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 500] // CK21-USE: [[MTYPE02:@.+]] = private {{.*}}constant [2 x i64] [i64 32, i64 281474976710674] // CK21-NOUSE: [[MTYPE02:@.+]] = private {{.*}}constant [2 x i64] [i64 0, i64 281474976710674] @@ -88,6 +89,7 @@ // CK21-NOUSE: [[MTYPE04:@.+]] = private {{.*}}constant [1 x i64] [i64 2] // CK21-LABEL: @.__omp_offloading_{{.*}}foo{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK21: [[SIZE05:@.+]] = private {{.*}}global [3 x i64] [i64 0, i64 4, i64 4] // CK21-USE: [[MTYPE05:@.+]] = private {{.*}}constant [3 x i64] [i64 32, i64 281474976710659, i64 281474976710659] // CK21-NOUSE: [[MTYPE05:@.+]] = private {{.*}}constant [3 x i64] [i64 0, i64 281474976710659, i64 281474976710659] @@ -150,29 +152,25 @@ } // Region 02 - // CK21-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE02]]{{.+}}, i8** null, i8** null) + // CK21-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE02]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE02]]{{.+}}, i8** null, i8** null) // CK21-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK21-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK21-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK21-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK21-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK21-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK21-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK21-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to float*** // CK21-DAG: store [[ST]]* [[VAR0:%.+]], [[ST]]** [[CBP0]] // CK21-DAG: store float** [[SEC0:%.+]], float*** [[CP0]] - // CK21-DAG: store i64 {{%.+}}, i64* [[S0]] + // CK21-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE02]], i32 0, i32 0), // CK21-DAG: [[SEC0]] = getelementptr {{.*}}[[ST]]* [[VAR0]], i{{.+}} 0, i{{.+}} 2 // CK21-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK21-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK21-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK21-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to float*** // CK21-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to float** // CK21-DAG: store float** [[SEC0]], float*** [[CBP1]] // CK21-DAG: store float* [[SEC1:%.+]], float** [[CP1]] - // CK21-DAG: store i64 {{.+}}, i64* [[S1]] // CK21-DAG: [[SEC1]] = getelementptr {{.*}}float* [[RVAR1:%[^,]+]], i{{.+}} 123 // CK21-DAG: [[RVAR1]] = load float*, float** [[SEC1_:%[^,]+]] // CK21-DAG: [[SEC1_]] = getelementptr {{.*}}[[ST]]* [[VAR0]], i{{.+}} 0, i{{.+}} 2 @@ -230,38 +228,32 @@ // Make sure the extra flag is passed to the second map. // Region 05 - // CK21-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE05]]{{.+}}, i8** null, i8** null) + // CK21-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE05]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE05]]{{.+}}, i8** null, i8** null) // CK21-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK21-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK21-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK21-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK21-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK21-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK21-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK21-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK21-DAG: store [[ST]]* [[VAR0:%.+]], [[ST]]** [[CBP0]] // CK21-DAG: store i32* [[SEC0:%.+]], i32** [[CP0]] - // CK21-DAG: store i64 {{%.+}}, i64* [[S0]] + // CK21-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE05]], i32 0, i32 0), // CK21-DAG: [[SEC0]] = getelementptr {{.*}}[[ST]]* [[VAR0]], i{{.+}} 0, i{{.+}} 0 // CK21-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK21-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK21-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK21-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[ST]]** // CK21-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK21-DAG: store [[ST]]* [[VAR0]], [[ST]]** [[CBP1]] // CK21-DAG: store i32* [[SEC0]], i32** [[CP1]] - // CK21-DAG: store i64 {{.+}}, i64* [[S1]] // CK21-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK21-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK21-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK21-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[ST]]** // CK21-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to i32** // CK21-DAG: store [[ST]]* [[VAR2:%.+]], [[ST]]** [[CBP2]] // CK21-DAG: store i32* [[SEC2:%.+]], i32** [[CP2]] - // CK21-DAG: store i64 {{.+}}, i64* [[S2]] // CK21-DAG: [[SEC2]] = getelementptr {{.*}}[[ST]]* [[VAR2]], i{{.+}} 0, i{{.+}} 1 // CK21-USE: call void [[CALL05:@.+]]([[ST]]* {{[^,]+}}) diff --git a/clang/test/OpenMP/target_map_codegen_23.cpp b/clang/test/OpenMP/target_map_codegen_23.cpp --- a/clang/test/OpenMP/target_map_codegen_23.cpp +++ b/clang/test/OpenMP/target_map_codegen_23.cpp @@ -73,9 +73,11 @@ // CK24: [[MTYPE15:@.+]] = private {{.*}}constant [1 x i64] [i64 35] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE16:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 20] // CK24: [[MTYPE16:@.+]] = private {{.*}}constant [2 x i64] [i64 32, i64 281474976710659] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE17:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 {{3560|2880}}] // CK24: [[MTYPE17:@.+]] = private {{.*}}constant [2 x i64] [i64 32, i64 281474976710675] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 @@ -83,21 +85,27 @@ // CK24: [[MTYPE18:@.+]] = private {{.*}}constant [1 x i64] [i64 35] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE19:@.+]] = private {{.*}}global [3 x i64] [i64 0, i64 {{8|4}}, i64 4] // CK24: [[MTYPE19:@.+]] = private {{.*}}constant [3 x i64] [i64 32, i64 281474976710659, i64 281474976710675] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE20:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 4] // CK24: [[MTYPE20:@.+]] = private {{.*}}constant [2 x i64] [i64 32, i64 281474976710675] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE21:@.+]] = private {{.*}}global [3 x i64] [i64 0, i64 {{8|4}}, i64 4] // CK24: [[MTYPE21:@.+]] = private {{.*}}constant [3 x i64] [i64 32, i64 281474976710659, i64 281474976710675] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE22:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 8] // CK24: [[MTYPE22:@.+]] = private {{.*}}constant [2 x i64] [i64 32, i64 281474976710659] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE23:@.+]] = private {{.*}}global [3 x i64] [i64 0, i64 {{8|4}}, i64 8] // CK24: [[MTYPE23:@.+]] = private {{.*}}constant [3 x i64] [i64 32, i64 281474976710659, i64 281474976710675] // CK24-LABEL: @.__omp_offloading_{{.*}}explicit_maps_struct_fields{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK24: [[SIZE24:@.+]] = private {{.*}}global [4 x i64] [i64 0, i64 {{8|4}}, i64 {{8|4}}, i64 4] // CK24: [[MTYPE24:@.+]] = private {{.*}}constant [4 x i64] [i64 32, i64 281474976710672, i64 16, i64 19] // CK24-LABEL: explicit_maps_struct_fields @@ -189,30 +197,26 @@ { p->a++; } // Region 16 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE16]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE16]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE16]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store i32* [[SEC0:%.+]], i32** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE16]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[10 x i32]* [[SEC00:%[^,]+]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[SEC00]] = getelementptr {{.*}}[[SC]]* [[VAR00:%.+]], i{{.+}} 0, i{{.+}} 3 // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK24-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SC]]** // CK24-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK24-DAG: store [[SC]]* [[VAR0]], [[SC]]** [[CBP1]] // CK24-DAG: store i32* [[SEC0]], i32** [[CP1]] -// CK24-DAG: store i64 {{.+}}, i64* [[S1]] // CK24-DAG: [[VAR0]] = load [[SC]]*, [[SC]]** %{{.+}} // CK24-DAG: [[VAR00]] = load [[SC]]*, [[SC]]** %{{.+}} @@ -222,29 +226,25 @@ { p->a++; } // Region 17 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE17]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE17]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE17]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SB]]*** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store [[SB]]** [[SEC0:%.+]], [[SB]]*** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE17]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[[SC]]* [[VAR00:%.+]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK24-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SB]]*** // CK24-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to [[SB]]** // CK24-DAG: store [[SB]]** [[SEC0]], [[SB]]*** [[CBP1]] // CK24-DAG: store [[SB]]* [[SEC1:%.+]], [[SB]]** [[CP1]] -// CK24-DAG: store i64 {{.+}}, i64* [[S1]] // CK24-DAG: [[SEC1]] = getelementptr {{.*}}[[SB]]* [[SEC11:%[^,]+]], i{{.+}} 0 // CK24-DAG: [[SEC11]] = load [[SB]]*, [[SB]]** [[SEC111:%[^,]+]], // CK24-DAG: [[SEC111]] = getelementptr {{.*}}[[SC]]* [[VAR000:%.+]], i{{.+}} 0, i{{.+}} 2 @@ -281,31 +281,27 @@ { p->a++; } // Region 19 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE19]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE19]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE19]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SA]]*** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store [[SA]]** [[SEC0:%.+]], [[SA]]*** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE19]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[10 x [[SA]]*]* [[SEC00:%[^,]+]], i{{.+}} 0, i{{.+}} 3 // CK24-DAG: [[SEC00]] = getelementptr {{.*}}[[SB]]* [[SEC000:%[^,]+]], i{{.+}} 0, i{{.+}} 3 // CK24-DAG: [[SEC000]] = getelementptr {{.*}}[[SC]]* [[VAR00:%.+]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK24-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SC]]** // CK24-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to [[SA]]*** // CK24-DAG: store [[SC]]* [[VAR0]], [[SC]]** [[CBP1]] // CK24-DAG: store [[SA]]** [[SEC0]], [[SA]]*** [[CP1]] -// CK24-DAG: store i64 {{.+}}, i64* [[S1]] // CK24-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 @@ -328,19 +324,17 @@ { p->a++; } // Region 20 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE20]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE20]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE20]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SB]]*** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store [[SB]]** [[SEC0:%.+]], [[SB]]*** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE20]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[[SC]]* [[VAR00:%.+]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 @@ -362,39 +356,33 @@ { p->a++; } // Region 21 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE21]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE21]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE21]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SA]]*** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store [[SA]]** [[SEC0:%.+]], [[SA]]*** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE21]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[[SB]]* [[SEC00:[^,]+]], i{{.+}} 0, i{{.+}} 4 // CK24-DAG: [[SEC00]] = getelementptr {{.*}}[[SC]]* [[VAR00:%.+]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK24-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SC]]** // CK24-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to [[SA]]*** // CK24-DAG: store [[SC]]* [[VAR0]], [[SC]]** [[CBP1]] // CK24-DAG: store [[SA]]** [[SEC0]], [[SA]]*** [[CP1]] -// CK24-DAG: store i64 {{.+}}, i64* [[S1]] // CK24-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 -// CK24-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[SA]]*** // CK24-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to i32** // CK24-DAG: store [[SA]]** [[SEC0]], [[SA]]*** [[CBP2]] // CK24-DAG: store i32* [[SEC1:%.+]], i32** [[CP2]] -// CK24-DAG: store i64 {{.+}}, i64* [[S2]] // CK24-DAG: [[SEC1]] = getelementptr {{.*}}[[SA]]* [[SEC11:%[^,]+]], i{{.+}} 0 // CK24-DAG: [[SEC11]] = load [[SA]]*, [[SA]]** [[SEC111:%[^,]+]], // CK24-DAG: [[SEC111]] = getelementptr {{.*}}[[SB]]* [[SEC1111:[^,]+]], i{{.+}} 0, i{{.+}} 4 @@ -409,19 +397,17 @@ { p->a++; } // Region 22 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE22]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE22]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE22]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store i32* [[SEC0:%.+]], i32** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE22]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[10 x i32]* [[SEC00:%[^,]+]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[SEC00]] = getelementptr {{.*}}[[SA]]* [[SEC000:%[^,]+]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[SEC000]] = getelementptr {{.*}}[[SB]]* [[SEC0000:%[^,]+]], i{{.+}} 0, i{{.+}} 1 @@ -429,12 +415,10 @@ // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK24-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SC]]** // CK24-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK24-DAG: store [[SC]]* [[VAR0]], [[SC]]** [[CBP1]] // CK24-DAG: store i32* [[SEC0]], i32** [[CP1]] -// CK24-DAG: store i64 {{.+}}, i64* [[S1]] // CK24-DAG: [[VAR0]] = load [[SC]]*, [[SC]]** %{{.+}} // CK24-DAG: [[VAR00]] = load [[SC]]*, [[SC]]** %{{.+}} @@ -444,39 +428,33 @@ { p->a++; } // Region 23 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE23]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE23]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE23]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SA]]*** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store [[SA]]** [[SEC0:%.+]], [[SA]]*** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE23]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[[SB]]* [[SEC00:%[^,]+]], i{{.+}} 0, i{{.+}} 4 // CK24-DAG: [[SEC00]] = getelementptr {{.*}}[[SC]]* [[VAR00:%.+]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK24-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SC]]** // CK24-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to [[SA]]*** // CK24-DAG: store [[SC]]* [[VAR0]], [[SC]]** [[CBP1]] // CK24-DAG: store [[SA]]** [[SEC0]], [[SA]]*** [[CP1]] -// CK24-DAG: store i64 {{.+}}, i64* [[S1]] // CK24-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 -// CK24-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[SA]]*** // CK24-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to i32** // CK24-DAG: store [[SA]]** [[SEC0]], [[SA]]*** [[CBP2]] // CK24-DAG: store i32* [[SEC1:%.+]], i32** [[CP2]] -// CK24-DAG: store i64 {{.+}}, i64* [[S2]] // CK24-DAG: [[SEC1]] = getelementptr {{.*}}[10 x i32]* [[SEC11:%[^,]+]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[SEC11]] = getelementptr {{.*}}[[SA]]* [[SEC111:%[^,]+]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[SEC111]] = load [[SA]]*, [[SA]]** [[SEC1111:%[^,]+]], @@ -492,41 +470,35 @@ { p->a++; } // Region 24 -// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE24]]{{.+}}, i8** null) +// CK24-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[SIZE24]]{{.+}}, {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE24]]{{.+}}, i8** null) // CK24-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK24-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK24-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK24-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK24-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SC]]** // CK24-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SB]]*** // CK24-DAG: store [[SC]]* [[VAR0:%.+]], [[SC]]** [[CBP0]] // CK24-DAG: store [[SB]]** [[SEC0:%.+]], [[SB]]*** [[CP0]] -// CK24-DAG: store i64 {{%.+}}, i64* [[S0]] +// CK24-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE24]], i32 0, i32 0), // CK24-DAG: [[SEC0]] = getelementptr {{.*}}[[SC]]* [[VAR00:%.+]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK24-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SB]]*** // CK24-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to [[SA]]*** // CK24-DAG: store [[SB]]** [[SEC0]], [[SB]]*** [[CBP1]] // CK24-DAG: store [[SA]]** [[SEC1:%.+]], [[SA]]*** [[CP1]] -// CK24-DAG: store i64 {{.+}}, i64* [[S1]] // CK24-DAG: [[SEC1]] = getelementptr {{.*}}[[SB]]* [[SEC11:%[^,]+]], i{{.+}} 0, i{{.+}} 4 // CK24-DAG: [[SEC11]] = load [[SB]]*, [[SB]]** [[SEC111:%[^,]+]], // CK24-DAG: [[SEC111]] = getelementptr {{.*}}[[SC]]* [[VAR000:%.+]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 -// CK24-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK24-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[SA]]*** // CK24-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to [[SA]]*** // CK24-DAG: store [[SA]]** [[SEC1]], [[SA]]*** [[CBP2]] // CK24-DAG: store [[SA]]** [[SEC2:%.+]], [[SA]]*** [[CP2]] -// CK24-DAG: store i64 {{.+}}, i64* [[S2]] // CK24-DAG: [[SEC2]] = getelementptr {{.*}}[[SA]]* [[SEC22:%[^,]+]], i{{.+}} 0, i{{.+}} 1 // CK24-DAG: [[SEC22]] = load [[SA]]*, [[SA]]** [[SEC222:%[^,]+]], // CK24-DAG: [[SEC222]] = getelementptr {{.*}}[[SB]]* [[SEC2222:%[^,]+]], i{{.+}} 0, i{{.+}} 4 @@ -535,12 +507,10 @@ // CK24-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 3 // CK24-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 3 -// CK24-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 3 // CK24-DAG: [[CBP3:%.+]] = bitcast i8** [[BP3]] to [[SA]]*** // CK24-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i32** // CK24-DAG: store [[SA]]** [[SEC2]], [[SA]]*** [[CBP3]] // CK24-DAG: store i32* [[SEC3:%.+]], i32** [[CP3]] -// CK24-DAG: store i64 {{.+}}, i64* [[S3]] // CK24-DAG: [[SEC3]] = getelementptr {{.*}}[[SA]]* [[SEC33:%[^,]+]], i{{.+}} 0, i{{.+}} 0 // CK24-DAG: [[SEC33]] = load [[SA]]*, [[SA]]** [[SEC333:%[^,]+]], // CK24-DAG: [[SEC333]] = getelementptr {{.*}}[[SA]]* [[SEC3333:%[^,]+]], i{{.+}} 0, i{{.+}} 1 diff --git a/clang/test/OpenMP/target_map_codegen_28.cpp b/clang/test/OpenMP/target_map_codegen_28.cpp --- a/clang/test/OpenMP/target_map_codegen_28.cpp +++ b/clang/test/OpenMP/target_map_codegen_28.cpp @@ -37,12 +37,15 @@ // CK29: [[SSB:%.+]] = type { [[SSA]]*, [[SSA]]** } // CK29-LABEL: @.__omp_offloading_{{.*}}foo{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK29: [[SIZE00:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 80] // CK29: [[MTYPE00:@.+]] = private {{.*}}constant [2 x i64] [i64 32, i64 281474976710675] // CK29-LABEL: @.__omp_offloading_{{.*}}foo{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK29: [[SIZE01:@.+]] = private {{.*}}global [3 x i64] [i64 0, i64 {{8|4}}, i64 80] // CK29: [[MTYPE01:@.+]] = private {{.*}}constant [3 x i64] [i64 32, i64 281474976710672, i64 19] // CK29-LABEL: @.__omp_offloading_{{.*}}foo{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK29: [[SIZE02:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 80] // CK29: [[MTYPE02:@.+]] = private {{.*}}constant [2 x i64] [i64 32, i64 281474976710675] struct SSA{ @@ -60,31 +63,27 @@ void foo() { // Region 00 - // CK29-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[Z:64|32]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) + // CK29-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null, i8** null) // CK29-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK29-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK29-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK29-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK29-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK29-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK29-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SSB]]** // CK29-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SSA]]*** // CK29-DAG: store [[SSB]]* [[VAR0:%.+]], [[SSB]]** [[CBP0]] // CK29-DAG: store [[SSA]]** [[VAR00:%.+]], [[SSA]]*** [[CP0]] - // CK29-DAG: store i64 %{{.+}}, i64* [[S0]] + // CK29-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE00]], i32 0, i32 0), // CK29-DAG: [[VAR0]] = load [[SSB]]*, [[SSB]]** % // CK29-DAG: [[VAR00]] = getelementptr inbounds [[SSB]], [[SSB]]* [[VAR0]], i32 0, i32 0 // CK29-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK29-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK29-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK29-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to double**** // CK29-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK29-DAG: store double*** [[VAR1:%.+]], double**** [[CBP2]] // CK29-DAG: store double* [[VAR2:%.+]], double** [[CP2]] - // CK29-DAG: store i64 80, i64* [[S2]] // CK29-DAG: [[VAR1]] = getelementptr inbounds [[SSA]], [[SSA]]* %{{.+}}, i32 0, i32 1 // CK29-DAG: [[VAR2]] = getelementptr inbounds double, double* [[VAR22:%.+]], i{{.+}} 0 // CK29-DAG: [[VAR22]] = load double*, double** %{{.+}}, @@ -96,40 +95,34 @@ } // Region 01 - // CK29-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE01]]{{.+}}, i8** null, i8** null) + // CK29-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE01]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE01]]{{.+}}, i8** null, i8** null) // CK29-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK29-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK29-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK29-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK29-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK29-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK29-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SSB]]** // CK29-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SSA]]**** // CK29-DAG: store [[SSB]]* [[VAR0]], [[SSB]]** [[CBP0]] // CK29-DAG: store [[SSA]]*** [[VAR000:%.+]], [[SSA]]**** [[CP0]] - // CK29-DAG: store i64 %{{.+}}, i64* [[S0]] + // CK29-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE01]], i32 0, i32 0), // CK29-DAG: [[VAR000]] = getelementptr inbounds [[SSB]], [[SSB]]* [[VAR0]], i32 0, i32 1 // CK29-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK29-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK29-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK29-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[SSA]]**** // CK29-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to double*** // CK29-DAG: store [[SSA]]*** [[VAR000]], [[SSA]]**** [[CBP1]] // CK29-DAG: store double** [[VAR1:%.+]], double*** [[CP1]] - // CK29-DAG: store i64 {{8|4}}, i64* [[S1]] // CK29-DAG: [[VAR1]] = getelementptr inbounds [[SSA]], [[SSA]]* %{{.+}}, i32 0, i32 0 // CK29-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK29-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK29-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK29-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to double*** // CK29-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK29-DAG: store double** [[VAR1]], double*** [[CBP2]] // CK29-DAG: store double* [[VAR2:%.+]], double** [[CP2]] - // CK29-DAG: store i64 80, i64* [[S2]] // CK29-DAG: [[VAR2]] = getelementptr inbounds double, double* [[VAR22:%.+]], i{{.+}} 0 // CK29-DAG: [[VAR22]] = load double*, double** %{{.+}}, @@ -140,30 +133,26 @@ } // Region 02 - // CK29-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE02]]{{.+}}, i8** null, i8** null) + // CK29-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE02]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE02]]{{.+}}, i8** null, i8** null) // CK29-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK29-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK29-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK29-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK29-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK29-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK29-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[SSB]]** // CK29-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to [[SSA]]**** // CK29-DAG: store [[SSB]]* [[VAR0]], [[SSB]]** [[CBP0]] // CK29-DAG: store [[SSA]]*** [[VAR000:%.+]], [[SSA]]**** [[CP0]] - // CK29-DAG: store i64 %{{.+}}, i64* [[S0]] + // CK29-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE02]], i32 0, i32 0), // CK29-DAG: [[VAR000]] = getelementptr inbounds [[SSB]], [[SSB]]* [[VAR0]], i32 0, i32 1 // CK29-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK29-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK29-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK29-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to double**** // CK29-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to double** // CK29-DAG: store double*** [[VAR1:%.+]], double**** [[CBP2]] // CK29-DAG: store double* [[VAR2:%.+]], double** [[CP2]] - // CK29-DAG: store i64 80, i64* [[S2]] // CK29-DAG: [[VAR1]] = getelementptr inbounds [[SSA]], [[SSA]]* %{{.+}}, i32 0, i32 1 // CK29-DAG: [[VAR2]] = getelementptr inbounds double, double* [[VAR22:%.+]], i{{.+}} 0 // CK29-DAG: [[VAR22]] = load double*, double** %{{.+}}, diff --git a/clang/test/OpenMP/target_map_codegen_29.cpp b/clang/test/OpenMP/target_map_codegen_29.cpp --- a/clang/test/OpenMP/target_map_codegen_29.cpp +++ b/clang/test/OpenMP/target_map_codegen_29.cpp @@ -37,6 +37,7 @@ // CK30-DAG: [[STRUCT:%.+]] = type { [[BASE]], i32*, i32*, i32, i32* } // CK30-LABEL: @.__omp_offloading_{{.*}}map_with_deep_copy{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK30: [[SIZE00:@.+]] = private unnamed_addr global [4 x i64] [i64 0, i64 {{56|28}}, i64 4, i64 4] // The first element: 0x20 - OMP_MAP_TARGET_PARAM // 2: 0x1000000000003 - OMP_MAP_MEMBER_OF(0) | OMP_MAP_TO | OMP_MAP_FROM - copies all the data in structs excluding deep-copied elements (from &s to end of s). // 3-4: 0x1000000000013 - OMP_MAP_MEMBER_OF(0) | OMP_MAP_PTR_AND_OBJ | OMP_MAP_TO | OMP_MAP_FROM - deep copy of the pointers + pointee. @@ -55,8 +56,7 @@ int *ptr1; } StructWithPtr; -// CK30-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @.__omp_offloading_{{.*}}map_with_deep_copy{{.*}}_l{{[0-9]+}}.region_id, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[MTYPE00]], i32 0, i32 0), i8** null, i8** null) -// CK30-DAG: [[GEPS]] = getelementptr inbounds [4 x i{{64|32}}], [4 x i64]* [[SIZES:%.+]], i32 0, i32 0 +// CK30-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @.__omp_offloading_{{.*}}map_with_deep_copy{{.*}}_l{{[0-9]+}}.region_id, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE00]], i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[MTYPE00]], i32 0, i32 0), i8** null, i8** null) // CK30-DAG: [[GEPP]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[PTRS:%.+]], i32 0, i32 0 // CK30-DAG: [[GEPBP]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[BASES:%.+]], i32 0, i32 0 @@ -66,8 +66,7 @@ // CK30-DAG: [[PTR:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[PTRS]], i32 0, i32 0 // CK30-DAG: [[BC:%.+]] = bitcast i8** [[PTR]] to [[STRUCT]]** // CK30-DAG: store [[STRUCT]]* [[S]], [[STRUCT]]** [[BC]], -// CK30-DAG: [[SIZE:%.+]] = getelementptr inbounds [4 x i{{64|32}}], [4 x i64]* [[SIZES]], i32 0, i32 0 -// CK30-DAG: store i64 [[S_ALLOC_SIZE:%.+]], i64* [[SIZE]], +// CK30-DAG: store i64 [[S_ALLOC_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE00]], i32 0, i32 0), // CK30-DAG: [[S_ALLOC_SIZE]] = sdiv exact i64 [[DIFF:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) // CK30-DAG: [[DIFF]] = sub i64 [[S_END_BC:%.+]], [[S_BEGIN_BC:%.+]] // CK30-DAG: [[S_BEGIN_BC]] = ptrtoint i8* [[S_BEGIN:%.+]] to i64 @@ -82,8 +81,6 @@ // CK30-DAG: [[PTR:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[PTRS]], i32 0, i32 1 // CK30-DAG: [[BC:%.+]] = bitcast i8** [[PTR]] to [[STRUCT]]** // CK30-DAG: store [[STRUCT]]* [[S]], [[STRUCT]]** [[BC]], -// CK30-DAG: [[SIZE:%.+]] = getelementptr inbounds [4 x i64], [4 x i64]* [[SIZES]], i32 0, i32 1 -// CK30-DAG: store i64 {{56|28}}, i64* [[SIZE]], // CK30-DAG: [[BASE_PTR:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[BASES]], i32 0, i32 2 // CK30-DAG: [[BC:%.+]] = bitcast i8** [[BASE_PTR]] to i32*** @@ -91,8 +88,6 @@ // CK30-DAG: [[PTR:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[PTRS]], i32 0, i32 2 // CK30-DAG: [[BC:%.+]] = bitcast i8** [[PTR]] to i32** // CK30-DAG: store i32* [[S_PTR1_BEGIN:%.+]], i32** [[BC]], -// CK30-DAG: [[SIZE:%.+]] = getelementptr inbounds [4 x i64], [4 x i64]* [[SIZES]], i32 0, i32 2 -// CK30-DAG: store i64 4, i64* [[SIZE]], // CK30-DAG: [[S_PTR1]] = getelementptr inbounds [[STRUCT]], [[STRUCT]]* [[S]], i32 0, i32 4 // CK30-DAG: [[S_PTR1_BEGIN]] = getelementptr inbounds i32, i32* [[S_PTR1_BEGIN_REF:%.+]], i{{64|32}} 0 // CK30-DAG: [[S_PTR1_BEGIN_REF]] = load i32*, i32** [[S_PTR1:%.+]], @@ -104,8 +99,6 @@ // CK30-DAG: [[PTR:%.+]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[PTRS]], i32 0, i32 3 // CK30-DAG: [[BC:%.+]] = bitcast i8** [[PTR]] to i32** // CK30-DAG: store i32* [[S_PTRBASE1_BEGIN:%.+]], i32** [[BC]], -// CK30-DAG: [[SIZE:%.+]] = getelementptr inbounds [4 x i{{64|32}}], [4 x i{{64|32}}]* [[SIZES]], i32 0, i32 3 -// CK30-DAG: store i{{64|32}} 4, i{{64|32}}* [[SIZE]], // CK30-DAG: [[S_PTRBASE1]] = getelementptr inbounds [[BASE]], [[BASE]]* [[S_BASE:%.+]], i32 0, i32 2 // CK30-DAG: [[S_BASE]] = bitcast [[STRUCT]]* [[S]] to [[BASE]]* // CK30-DAG: [[S_PTRBASE1_BEGIN]] = getelementptr inbounds i32, i32* [[S_PTRBASE1_BEGIN_REF:%.+]], i{{64|32}} 0 diff --git a/clang/test/OpenMP/target_map_codegen_31.cpp b/clang/test/OpenMP/target_map_codegen_31.cpp --- a/clang/test/OpenMP/target_map_codegen_31.cpp +++ b/clang/test/OpenMP/target_map_codegen_31.cpp @@ -41,6 +41,7 @@ // CK31A-LABEL: @.__omp_offloading_{{.*}}explicit_maps_single{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 // +// CK31A: [[SIZE00:@.+]] = private {{.*}}global [7 x i64] [i64 0, i64 4, i64 4, i64 4, i64 0, i64 4, i64 4] // PRESENT=0x1000 | TARGET_PARAM=0x20 = 0x1020 // CK31A-USE: [[MTYPE00:@.+]] = private {{.*}}constant [7 x i64] [i64 [[#0x1020]], // CK31A-NOUSE: [[MTYPE00:@.+]] = private {{.*}}constant [7 x i64] [i64 [[#0x1000]], @@ -84,80 +85,67 @@ // CK31A: [[ST1_I:%.+]] = getelementptr inbounds [[ST]], [[ST]]* [[ST1]], i{{.+}} 0, i{{.+}} 0 // CK31A: [[ST2_I:%.+]] = getelementptr inbounds [[ST]], [[ST]]* [[ST2]], i{{.+}} 0, i{{.+}} 0 // CK31A: [[ST2_J:%.+]] = getelementptr inbounds [[ST]], [[ST]]* [[ST2]], i{{.+}} 0, i{{.+}} 1 -// CK31A-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 7, i8** [[GEPBP:%[0-9]+]], i8** [[GEPP:%[0-9]+]], i64* [[GEPS:%.+]], i64* getelementptr {{.+}}[7 x i{{.+}}]* [[MTYPE00]]{{.+}}) -// CK31A-DAG: [[GEPS]] = getelementptr inbounds [7 x i64], [7 x i64]* [[S:%.+]], i{{.+}} 0, i{{.+}} 0 +// CK31A-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 7, i8** [[GEPBP:%[0-9]+]], i8** [[GEPP:%[0-9]+]], i64* getelementptr {{.+}}[7 x i{{.+}}]* [[SIZE00]]{{.+}}, i64* getelementptr {{.+}}[7 x i{{.+}}]* [[MTYPE00]]{{.+}}) // CK31A-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK31A-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] // st1 // CK31A-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK31A-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK31A-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK31A-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK31A-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK31A-DAG: store [[ST]]* [[ST1]], [[ST]]** [[CBP0]] // CK31A-DAG: store i32* [[ST1_I]], i32** [[CP0]] -// CK31A-DAG: store i64 %{{.+}}, i64* [[S0]] +// CK31A-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([7 x i64], [7 x i64]* [[SIZE00]], i32 0, i32 0), // st1.j // CK31A-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK31A-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK31A-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK31A-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[ST]]** // CK31A-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK31A-DAG: store [[ST]]* [[ST1]], [[ST]]** [[CBP1]] // CK31A-DAG: store i32* [[ST1_J]], i32** [[CP1]] -// CK31A-DAG: store i64 4, i64* [[S1]] // st1.i // CK31A-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK31A-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 -// CK31A-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK31A-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[ST]]** // CK31A-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to i32** // CK31A-DAG: store [[ST]]* [[ST1]], [[ST]]** [[CBP2]] // CK31A-DAG: store i32* [[ST1_I]], i32** [[CP2]] -// CK31A-DAG: store i64 4, i64* [[S2]] // a // CK31A-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 3 // CK31A-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 3 -// CK31A-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 3 // CK31A-DAG: [[CBP3:%.+]] = bitcast i8** [[BP3]] to i32** // CK31A-DAG: [[CP3:%.+]] = bitcast i8** [[P3]] to i32** // CK31A-DAG: store i32* [[A]], i32** [[CBP3]] // CK31A-DAG: store i32* [[A]], i32** [[CP3]] -// CK31A-DAG: store i64 4, i64* [[S3]] // st2 // CK31A-DAG: [[BP4:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 4 // CK31A-DAG: [[P4:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 4 -// CK31A-DAG: [[S4:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 4 // CK31A-DAG: [[CBP4:%.+]] = bitcast i8** [[BP4]] to [[ST]]** // CK31A-DAG: [[CP4:%.+]] = bitcast i8** [[P4]] to i32** // CK31A-DAG: store [[ST]]* [[ST2]], [[ST]]** [[CBP4]] // CK31A-DAG: store i32* [[ST2_I]], i32** [[CP4]] -// CK31A-DAG: store i64 %{{.+}}, i64* [[S4]] +// CK31A-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([7 x i64], [7 x i64]* [[SIZE00]], i32 0, i32 4), // st2.i // CK31A-DAG: [[BP5:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 5 // CK31A-DAG: [[P5:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 5 -// CK31A-DAG: [[S5:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 5 // CK31A-DAG: [[CBP5:%.+]] = bitcast i8** [[BP5]] to [[ST]]** // CK31A-DAG: [[CP5:%.+]] = bitcast i8** [[P5]] to i32** // CK31A-DAG: store [[ST]]* [[ST2]], [[ST]]** [[CBP5]] // CK31A-DAG: store i32* [[ST2_I]], i32** [[CP5]] -// CK31A-DAG: store i64 4, i64* [[S5]] // st2.j // CK31A-DAG: [[BP6:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 6 // CK31A-DAG: [[P6:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 6 -// CK31A-DAG: [[S6:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 6 // CK31A-DAG: [[CBP6:%.+]] = bitcast i8** [[BP6]] to [[ST]]** // CK31A-DAG: [[CP6:%.+]] = bitcast i8** [[P6]] to i32** // CK31A-DAG: store [[ST]]* [[ST2]], [[ST]]** [[CBP6]] // CK31A-DAG: store i32* [[ST2_J]], i32** [[CP6]] -// CK31A-DAG: store i64 4, i64* [[S6]] // CK31A-USE: call void [[CALL00:@.+]]([[ST]]* [[ST1]], i32* [[A]], [[ST]]* [[ST2]]) // CK31A-NOUSE: call void [[CALL00:@.+]]() diff --git a/clang/test/OpenMP/target_map_codegen_32.cpp b/clang/test/OpenMP/target_map_codegen_32.cpp --- a/clang/test/OpenMP/target_map_codegen_32.cpp +++ b/clang/test/OpenMP/target_map_codegen_32.cpp @@ -40,6 +40,7 @@ // MEMBER_OF_1=0x1000000000000 | FROM=0x2 | TO=0x1 = 0x1000000000003 // CK31B-LABEL: @.__omp_offloading_{{.*}}test_present_members{{.*}}_l{{[0-9]+}}.region_id = weak constant i8 0 +// CK31B: [[SIZE00:@.+]] = private {{.*}}global [3 x i64] [i64 0, i64 4, i64 4] // CK31B-USE: [[MTYPE00:@.+]] = private {{.*}}constant [3 x i64] [i64 [[#0x1020]], // CK31B-NOUSE: [[MTYPE00:@.+]] = private {{.*}}constant [3 x i64] [i64 [[#0x1000]], // CK31B-USE-SAME: {{^}} i64 [[#0x1000000001003]], i64 [[#0x1000000000003]]] @@ -55,40 +56,34 @@ // Region 00 // CK31B: [[J:%.+]] = getelementptr inbounds [[ST]], [[ST]]* [[THIS:%.+]], i{{.+}} 0, i{{.+}} 1 // CK31B: [[I:%.+]] = getelementptr inbounds [[ST]], [[ST]]* [[THIS]], i{{.+}} 0, i{{.+}} 0 -// CK31B-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%[0-9]+]], i8** [[GEPP:%[0-9]+]], i64* [[GEPS:%.+]], i64* getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE00]]{{.+}}) -// CK31B-DAG: [[GEPS]] = getelementptr inbounds [3 x i64], [3 x i64]* [[S:%.+]], i{{.+}} 0, i{{.+}} 0 +// CK31B-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 {{[^,]+}}, i8* {{[^,]+}}, i32 3, i8** [[GEPBP:%[0-9]+]], i8** [[GEPP:%[0-9]+]], i64* getelementptr {{.+}}[3 x i{{.+}}]* [[SIZE00]]{{.+}}, i64* getelementptr {{.+}}[3 x i{{.+}}]* [[MTYPE00]]{{.+}}) // CK31B-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK31B-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] // this // CK31B-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK31B-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK31B-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK31B-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK31B-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to i32** // CK31B-DAG: store [[ST]]* [[THIS]], [[ST]]** [[CBP0]] // CK31B-DAG: store i32* [[I]], i32** [[CP0]] -// CK31B-DAG: store i64 %{{.+}}, i64* [[S0]] +// CK31B-DAG: store i64 %{{.+}}, i64* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE00]], i32 0, i32 0), // j // CK31B-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK31B-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 -// CK31B-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK31B-DAG: [[CBP1:%.+]] = bitcast i8** [[BP1]] to [[ST]]** // CK31B-DAG: [[CP1:%.+]] = bitcast i8** [[P1]] to i32** // CK31B-DAG: store [[ST]]* [[THIS]], [[ST]]** [[CBP1]] // CK31B-DAG: store i32* [[J]], i32** [[CP1]] -// CK31B-DAG: store i64 4, i64* [[S1]] // i // CK31B-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK31B-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 -// CK31B-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK31B-DAG: [[CBP2:%.+]] = bitcast i8** [[BP2]] to [[ST]]** // CK31B-DAG: [[CP2:%.+]] = bitcast i8** [[P2]] to i32** // CK31B-DAG: store [[ST]]* [[THIS]], [[ST]]** [[CBP2]] // CK31B-DAG: store i32* [[I]], i32** [[CP2]] -// CK31B-DAG: store i64 4, i64* [[S2]] // CK31B-USE: call void [[CALL00:@.+]]([[ST]]* [[THIS]]) // CK31B-NOUSE: call void [[CALL00:@.+]]() diff --git a/clang/test/OpenMP/target_map_codegen_34.cpp b/clang/test/OpenMP/target_map_codegen_34.cpp --- a/clang/test/OpenMP/target_map_codegen_34.cpp +++ b/clang/test/OpenMP/target_map_codegen_34.cpp @@ -35,10 +35,12 @@ void foo(); }; +// CK34-DAG: [[SIZE_TO:@.+]] = private {{.*}}global [4 x i64] [i64 0, i64 0, i64 0, i64 {{16|8}}] // TARGET_PARAM = 0x20 // MEMBER_OF_1 | TO = 0x1000000000001 // MEMBER_OF_1 | IMPLICIT | TO = 0x1000000000201 // CK34-DAG: [[MTYPE_TO:@.+]] = {{.+}}constant [4 x i64] [i64 [[#0x20]], i64 [[#0x1000000000001]], i64 [[#0x1000000000001]], i64 [[#0x1000000000201]]] +// CK34-DAG: [[SIZE_FROM:@.+]] = private {{.*}}global [4 x i64] [i64 0, i64 0, i64 0, i64 {{16|8}}] // TARGET_PARAM = 0x20 // MEMBER_OF_1 | FROM = 0x1000000000002 // MEMBER_OF_1 | IMPLICIT | FROM = 0x1000000000202 @@ -47,17 +49,15 @@ void default_mapper() { S s; - // CK34-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE_TO]]{{.+}}, i8** null, i8** [[GEPMF:%.+]]) + // CK34-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[SIZE_TO]]{{.+}}, {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE_TO]]{{.+}}, i8** null, i8** [[GEPMF:%.+]]) // CK34-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK34-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK34-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK34-DAG: [[GEPMF]] = bitcast [4 x i8*]* [[MF:%.+]] to i8** // pass TARGET_PARAM {&s, &s, ((void*)(&s+1)-(void*)&s)} // CK34-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK34-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK34-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK34-DAG: [[MF0:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 0 // CK34-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to %class.S** @@ -65,7 +65,7 @@ // CK34-DAG: store %class.S* [[S_ADDR:%.+]], %class.S** [[BPC0]], // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[PC0]], - // CK34-DAG: store i64 [[S_SIZE:%.+]], i64* [[S0]], + // CK34-DAG: store i64 [[S_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_TO]], i32 0, i32 0), // CK34-DAG: store i8* null, i8** [[MF0]], // CK34-DAG: [[S_SIZE]] = sdiv exact i64 [[SZ:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) @@ -80,7 +80,6 @@ // CK34-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK34-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK34-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK34-DAG: [[MF1:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 1 // CK34-DAG: [[BPC1:%.+]] = bitcast i8** [[BP1]] to %class.S** @@ -88,7 +87,7 @@ // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC1]], // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[PC1]], - // CK34-DAG: store i64 [[A_SIZE:%.+]], i64* [[S1]], + // CK34-DAG: store i64 [[A_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_TO]], i32 0, i32 1), // CK34-DAG: store i8* null, i8** [[MF1]], // CK34-DAG: [[A_SIZE]] = sdiv exact i64 [[SZ:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) @@ -104,7 +103,6 @@ // CK34-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK34-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK34-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK34-DAG: [[MF2:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 2 // CK34-DAG: [[BPC2:%.+]] = bitcast i8** [[BP2]] to %class.S** @@ -112,7 +110,7 @@ // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC2]], // CK34-DAG: store %class.C* [[C_END:%.+]], %class.C** [[PC2]], - // CK34-DAG: store i64 [[B_SIZE:%.+]], i64* [[S2]], + // CK34-DAG: store i64 [[B_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_TO]], i32 0, i32 2), // CK34-DAG: store i8* null, i8** [[MF2]], // CK34-DAG: [[C_END]] = getelementptr %class.C, %class.C* [[C_ADDR]], i{{.+}} 1 @@ -131,7 +129,6 @@ // CK34-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 3 // CK34-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 3 - // CK34-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 3 // CK34-DAG: [[MF3:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 3 // CK34-DAG: [[BPC3:%.+]] = bitcast i8** [[BP3]] to %class.S** @@ -139,8 +136,6 @@ // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC3]], // CK34-DAG: store %class.C* [[C_ADDR:%.+]], %class.C** [[PC3]], - // CK34-64-DAG: store i64 16, i64* [[S3]], - // CK34-32-DAG: store i64 8, i64* [[S3]], // CK34-DAG: store i8* bitcast (void (i8*, i8*, i8*, i64, i64, i8*)* [[C_DEFAULT_MAPPER:@.+]] to i8*), i8** [[MF3]], // CK34-64-DAG: [[C_ADDR]] = getelementptr inbounds %class.S, %class.S* [[S_ADDR]], i32 0, i32 2 @@ -151,17 +146,15 @@ // CK34 : call void - // CK34-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE_FROM]]{{.+}}, i8** null, i8** [[GEPMF:%.+]]) + // CK34-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[SIZE_FROM]]{{.+}}, {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE_FROM]]{{.+}}, i8** null, i8** [[GEPMF:%.+]]) // CK34-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK34-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK34-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK34-DAG: [[GEPMF]] = bitcast [4 x i8*]* [[MF:%.+]] to i8** // pass TARGET_PARAM {&s, &s, ((void*)(&s+1)-(void*)&s)} // CK34-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK34-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK34-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK34-DAG: [[MF0:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 0 // CK34-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to %class.S** @@ -169,7 +162,7 @@ // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC0]], // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[PC0]], - // CK34-DAG: store i64 [[S_SIZE:%.+]], i64* [[S0]], + // CK34-DAG: store i64 [[S_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_FROM]], i32 0, i32 0), // CK34-DAG: store i8* null, i8** [[MF0]], // CK34-DAG: [[S_SIZE]] = sdiv exact i64 [[SZ:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) @@ -184,7 +177,6 @@ // CK34-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK34-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK34-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK34-DAG: [[MF1:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 1 // CK34-DAG: [[BPC1:%.+]] = bitcast i8** [[BP1]] to %class.S** @@ -192,7 +184,7 @@ // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC1]], // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[PC1]], - // CK34-DAG: store i64 [[A_SIZE:%.+]], i64* [[S1]], + // CK34-DAG: store i64 [[A_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_FROM]], i32 0, i32 1), // CK34-DAG: store i8* null, i8** [[MF1]], // CK34-DAG: [[A_SIZE]] = sdiv exact i64 [[SZ:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) @@ -208,7 +200,6 @@ // CK34-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK34-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK34-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK34-DAG: [[MF2:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 2 // CK34-DAG: [[BPC2:%.+]] = bitcast i8** [[BP2]] to %class.S** @@ -216,7 +207,7 @@ // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC2]], // CK34-DAG: store %class.C* [[C_END:%.+]], %class.C** [[PC2]], - // CK34-DAG: store i64 [[B_SIZE:%.+]], i64* [[S2]], + // CK34-DAG: store i64 [[B_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_FROM]], i32 0, i32 2), // CK34-DAG: store i8* null, i8** [[MF2]], // CK34-DAG: [[C_END]] = getelementptr %class.C, %class.C* [[C_ADDR]], i{{.+}} 1 @@ -235,7 +226,6 @@ // CK34-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 3 // CK34-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 3 - // CK34-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 3 // CK34-DAG: [[MF3:%.+]] = getelementptr inbounds {{.+}}[[MF]], i{{.+}} 0, i{{.+}} 3 // CK34-DAG: [[BPC3:%.+]] = bitcast i8** [[BP3]] to %class.S** @@ -243,8 +233,6 @@ // CK34-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC3]], // CK34-DAG: store %class.C* [[C_ADDR:%.+]], %class.C** [[PC3]], - // CK34-64-DAG: store i64 16, i64* [[S3]], - // CK34-32-DAG: store i64 8, i64* [[S3]], // CK34-DAG: store i8* bitcast (void (i8*, i8*, i8*, i64, i64, i8*)* [[C_DEFAULT_MAPPER]] to i8*), i8** [[MF3]], // CK34-64-DAG: [[C_ADDR]] = getelementptr inbounds %class.S, %class.S* [[S_ADDR]], i32 0, i32 2 diff --git a/clang/test/OpenMP/target_map_codegen_35.cpp b/clang/test/OpenMP/target_map_codegen_35.cpp --- a/clang/test/OpenMP/target_map_codegen_35.cpp +++ b/clang/test/OpenMP/target_map_codegen_35.cpp @@ -27,10 +27,12 @@ void foo(); }; +// CK35-DAG: [[SIZE_TO:@.+]] = private {{.*}}global [4 x i64] [i64 0, i64 0, i64 0, i64 8] // TARGET_PARAM = 0x20 // MEMBER_OF_1 | TO = 0x1000000000001 // MEMBER_OF_1 | PTR_AND_OBJ | TO = 0x1000000000011 // CK35-DAG: [[MTYPE_TO:@.+]] = {{.+}}constant [4 x i64] [i64 [[#0x20]], i64 [[#0x1000000000001]], i64 [[#0x1000000000001]], i64 [[#0x1000000000011]]] +// CK35-DAG: [[SIZE_FROM:@.+]] = private {{.*}}global [2 x i64] [i64 0, i64 8] // TARGET_PARAM = 0x20 // MEMBER_OF_1 | PTR_AND_OBJ | FROM = 0x1000000000012 // CK35-DAG: [[MTYPE_FROM:@.+]] = {{.+}}constant [2 x i64] [i64 [[#0x20]], i64 [[#0x1000000000012]]] @@ -39,23 +41,21 @@ double b; S s(b); - // CK35-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE_TO]]{{.+}}, i8** null, i8** null) + // CK35-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 4, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[SIZE_TO]]{{.+}}, {{.+}}getelementptr {{.+}}[4 x i{{.+}}]* [[MTYPE_TO]]{{.+}}, i8** null, i8** null) // CK35-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK35-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK35-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // pass TARGET_PARAM {&s, &s, ((void*)(&s+1)-(void*)&s)} // CK35-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK35-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK35-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK35-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to %class.S** // CK35-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to %class.S** // CK35-DAG: store %class.S* [[S_ADDR:%.+]], %class.S** [[BPC0]], // CK35-DAG: store %class.S* [[S_ADDR]], %class.S** [[PC0]], - // CK35-DAG: store i64 [[S_SIZE:%.+]], i64* [[S0]], + // CK35-DAG: store i64 [[S_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_TO]], i32 0, i32 0), // CK35-DAG: [[S_SIZE]] = sdiv exact i64 [[SZ:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) // CK35-DAG: [[SZ]] = sub i64 [[S_1_INTPTR:%.+]], [[S_INTPTR:%.+]] @@ -69,14 +69,13 @@ // CK35-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK35-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK35-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK35-DAG: [[BPC1:%.+]] = bitcast i8** [[BP1]] to %class.S** // CK35-DAG: [[PC1:%.+]] = bitcast i8** [[P1]] to %class.S** // CK35-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC1]], // CK35-DAG: store %class.S* [[S_ADDR]], %class.S** [[PC1]], - // CK35-DAG: store i64 [[A_SIZE:%.+]], i64* [[S1]], + // CK35-DAG: store i64 [[A_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_TO]], i32 0, i32 1), // CK35-DAG: [[A_SIZE]] = sdiv exact i64 [[SZ:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) // CK35-DAG: [[SZ]] = sub i64 [[B_BEGIN_INTPTR:%.+]], [[S_INTPTR:%.+]] @@ -90,14 +89,13 @@ // CK35-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK35-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK35-DAG: [[S2:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 2 // CK35-DAG: [[BPC2:%.+]] = bitcast i8** [[BP2]] to %class.S** // CK35-DAG: [[PC2:%.+]] = bitcast i8** [[P2]] to double*** // CK35-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC2]], // CK35-DAG: store double** [[B_END:%.+]], double*** [[PC2]], - // CK35-DAG: store i64 [[REM_SIZE:%.+]], i64* [[S2]], + // CK35-DAG: store i64 [[REM_SIZE:%.+]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* [[SIZE_TO]], i32 0, i32 2), // CK35-DAG: [[B_END]] = getelementptr double*, double** [[B_ADDR]], i{{.+}} 1 @@ -115,14 +113,12 @@ // CK35-DAG: [[BP3:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 3 // CK35-DAG: [[P3:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 3 - // CK35-DAG: [[S3:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 3 // CK35-DAG: [[BPC3:%.+]] = bitcast i8** [[BP3]] to %class.S** // CK35-DAG: [[PC3:%.+]] = bitcast i8** [[P3]] to double** // CK35-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC3]], // CK35-DAG: store double* [[B_ADDR:%.+]], double** [[PC3]], - // CK35-DAG: store i64 8, i64* [[S3]], // CK35-DAG: [[B_ADDR]] = load double*, double** [[B_REF:%.+]], // CK35-DAG: [[B_REF]] = getelementptr inbounds %class.S, %class.S* [[S_ADDR]], i32 0, i32 1 @@ -132,23 +128,21 @@ // CK35 : call void - // CK35-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE_FROM]]{{.+}}, i8** null, i8** null) + // CK35-DAG: call i32 @__tgt_target_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{.+}}, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE_FROM]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE_FROM]]{{.+}}, i8** null, i8** null) // CK35-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK35-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK35-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // pass TARGET_PARAM {&s, &s.b, ((void*)(&s.b+1)-(void*)&s.b)} // CK35-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK35-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK35-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK35-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to %class.S** // CK35-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to double*** // CK35-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC0]], // CK35-DAG: store double** [[SB_ADDR:%.+]], double*** [[PC0]], - // CK35-DAG: store i64 [[B_SIZE:%.+]], i64* [[S0]], + // CK35-DAG: store i64 [[B_SIZE:%.+]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE_FROM]], i32 0, i32 0), // CK35-DAG: [[B_SIZE]] = sdiv exact i64 [[SZ:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) // CK35-DAG: [[SZ]] = sub i64 [[SB_1_INTPTR:%.+]], [[SB_INTPTR:%.+]] @@ -163,14 +157,12 @@ // CK35-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK35-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK35-DAG: [[S1:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 1 // CK35-DAG: [[BPC1:%.+]] = bitcast i8** [[BP1]] to %class.S** // CK35-DAG: [[PC1:%.+]] = bitcast i8** [[P1]] to double** // CK35-DAG: store %class.S* [[S_ADDR]], %class.S** [[BPC1]], // CK35-DAG: store double* [[B_ADDR:%.+]], double** [[PC1]], - // CK35-DAG: store i64 8, i64* [[S1]], // CK35-DAG: [[B_ADDR]] = load double*, double** [[SB_ADDR]], diff --git a/clang/test/OpenMP/target_map_codegen_hold.cpp b/clang/test/OpenMP/target_map_codegen_hold.cpp --- a/clang/test/OpenMP/target_map_codegen_hold.cpp +++ b/clang/test/OpenMP/target_map_codegen_hold.cpp @@ -1,4 +1,4 @@ -// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --global-value-regex ".offload_maptypes.*" ".offload_sizes.*" --global-hex-value-regex ".offload_maptypes.*" +// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --check-globals --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" --prefix-filecheck-ir-name _ --global-value-regex ".offload_maptypes.*" ".offload_sizes.*" --global-hex-value-regex ".offload_maptypes.*" // expected-no-diagnostics #ifndef HEADER #define HEADER @@ -83,25 +83,33 @@ // MEMBER_OF_5 = 0x5000000000000 //. +// CHECK-USE-PPC64LE: @.offload_sizes = private unnamed_addr global [7 x i64] [i64 0, i64 4, i64 4, i64 4, i64 0, i64 4, i64 4] // CHECK-USE-PPC64LE: @.offload_maptypes = private unnamed_addr constant [7 x i64] [i64 [[#0x2020]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]], i64 [[#0x2023]], i64 [[#0x2020]], i64 [[#0x5000000002003]], i64 [[#0x5000000002003]]] -// CHECK-USE-PPC64LE: @.offload_sizes = private unnamed_addr constant [1 x i64] [i64 4] -// CHECK-USE-PPC64LE: @.offload_maptypes.1 = private unnamed_addr constant [1 x i64] [i64 [[#0x2427]]] -// CHECK-USE-PPC64LE: @.offload_maptypes.2 = private unnamed_addr constant [3 x i64] [i64 [[#0x2020]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] +// CHECK-USE-PPC64LE: @.offload_sizes.1 = private unnamed_addr constant [1 x i64] [i64 4] +// CHECK-USE-PPC64LE: @.offload_maptypes.2 = private unnamed_addr constant [1 x i64] [i64 [[#0x2427]]] +// CHECK-USE-PPC64LE: @.offload_sizes.3 = private unnamed_addr global [3 x i64] [i64 0, i64 4, i64 4] +// CHECK-USE-PPC64LE: @.offload_maptypes.4 = private unnamed_addr constant [3 x i64] [i64 [[#0x2020]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] //. +// CHECK-USE-I386: @.offload_sizes = private unnamed_addr global [7 x i64] [i64 0, i64 4, i64 4, i64 4, i64 0, i64 4, i64 4] // CHECK-USE-I386: @.offload_maptypes = private unnamed_addr constant [7 x i64] [i64 [[#0x2020]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]], i64 [[#0x2023]], i64 [[#0x2020]], i64 [[#0x5000000002003]], i64 [[#0x5000000002003]]] -// CHECK-USE-I386: @.offload_sizes = private unnamed_addr constant [1 x i64] [i64 4] -// CHECK-USE-I386: @.offload_maptypes.1 = private unnamed_addr constant [1 x i64] [i64 [[#0x2427]]] -// CHECK-USE-I386: @.offload_maptypes.2 = private unnamed_addr constant [3 x i64] [i64 [[#0x2020]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] +// CHECK-USE-I386: @.offload_sizes.1 = private unnamed_addr constant [1 x i64] [i64 4] +// CHECK-USE-I386: @.offload_maptypes.2 = private unnamed_addr constant [1 x i64] [i64 [[#0x2427]]] +// CHECK-USE-I386: @.offload_sizes.3 = private unnamed_addr global [3 x i64] [i64 0, i64 4, i64 4] +// CHECK-USE-I386: @.offload_maptypes.4 = private unnamed_addr constant [3 x i64] [i64 [[#0x2020]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] //. +// CHECK-NOUSE-PPC64LE: @.offload_sizes = private unnamed_addr global [7 x i64] [i64 0, i64 4, i64 4, i64 4, i64 0, i64 4, i64 4] // CHECK-NOUSE-PPC64LE: @.offload_maptypes = private unnamed_addr constant [7 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]], i64 [[#0x2003]], i64 [[#0x2000]], i64 [[#0x5000000002003]], i64 [[#0x5000000002003]]] -// CHECK-NOUSE-PPC64LE: @.offload_sizes = private unnamed_addr constant [1 x i64] [i64 4] -// CHECK-NOUSE-PPC64LE: @.offload_maptypes.1 = private unnamed_addr constant [1 x i64] [i64 [[#0x2407]]] -// CHECK-NOUSE-PPC64LE: @.offload_maptypes.2 = private unnamed_addr constant [3 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] +// CHECK-NOUSE-PPC64LE: @.offload_sizes.1 = private unnamed_addr constant [1 x i64] [i64 4] +// CHECK-NOUSE-PPC64LE: @.offload_maptypes.2 = private unnamed_addr constant [1 x i64] [i64 [[#0x2407]]] +// CHECK-NOUSE-PPC64LE: @.offload_sizes.3 = private unnamed_addr global [3 x i64] [i64 0, i64 4, i64 4] +// CHECK-NOUSE-PPC64LE: @.offload_maptypes.4 = private unnamed_addr constant [3 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] //. +// CHECK-NOUSE-I386: @.offload_sizes = private unnamed_addr global [7 x i64] [i64 0, i64 4, i64 4, i64 4, i64 0, i64 4, i64 4] // CHECK-NOUSE-I386: @.offload_maptypes = private unnamed_addr constant [7 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]], i64 [[#0x2003]], i64 [[#0x2000]], i64 [[#0x5000000002003]], i64 [[#0x5000000002003]]] -// CHECK-NOUSE-I386: @.offload_sizes = private unnamed_addr constant [1 x i64] [i64 4] -// CHECK-NOUSE-I386: @.offload_maptypes.1 = private unnamed_addr constant [1 x i64] [i64 [[#0x2407]]] -// CHECK-NOUSE-I386: @.offload_maptypes.2 = private unnamed_addr constant [3 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] +// CHECK-NOUSE-I386: @.offload_sizes.1 = private unnamed_addr constant [1 x i64] [i64 4] +// CHECK-NOUSE-I386: @.offload_maptypes.2 = private unnamed_addr constant [1 x i64] [i64 [[#0x2407]]] +// CHECK-NOUSE-I386: @.offload_sizes.3 = private unnamed_addr global [3 x i64] [i64 0, i64 4, i64 4] +// CHECK-NOUSE-I386: @.offload_maptypes.4 = private unnamed_addr constant [3 x i64] [i64 [[#0x2000]], i64 [[#0x1000000002003]], i64 [[#0x1000000002003]]] //. struct ST { int i; @@ -118,7 +126,6 @@ // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 8 // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 8 // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 8 -// CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 8 // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8 // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8 // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8 @@ -149,95 +156,82 @@ // CHECK-USE-PPC64LE-NEXT: [[TMP17:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-USE-PPC64LE-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32** // CHECK-USE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP18]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP20]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-USE-PPC64LE-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.ST** -// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP22]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-USE-PPC64LE-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP24]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-USE-PPC64LE-NEXT: store i64 4, i64* [[TMP25]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP26]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-USE-PPC64LE-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST** -// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-USE-PPC64LE-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP30]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP31:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-USE-PPC64LE-NEXT: store i64 4, i64* [[TMP31]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP32]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP33:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK-USE-PPC64LE-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP34]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK-USE-PPC64LE-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP36]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK-USE-PPC64LE-NEXT: store i64 4, i64* [[TMP37]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP38:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP38]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK-USE-PPC64LE-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to %struct.ST** -// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP40]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP41:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK-USE-PPC64LE-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP42]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP43:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK-USE-PPC64LE-NEXT: store i64 [[TMP14]], i64* [[TMP43]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK-USE-PPC64LE-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-USE-PPC64LE-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.ST** +// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP21]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-USE-PPC64LE-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP23]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-USE-PPC64LE-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.ST** +// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP26]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-USE-PPC64LE-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP28]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP30:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK-USE-PPC64LE-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP31]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK-USE-PPC64LE-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP33]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK-USE-PPC64LE-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to %struct.ST** +// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP36]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK-USE-PPC64LE-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP38]], align 8 +// CHECK-USE-PPC64LE-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP39]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP40:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-USE-PPC64LE-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to %struct.ST** +// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP41]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-USE-PPC64LE-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP43]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP44]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-USE-PPC64LE-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 // CHECK-USE-PPC64LE-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to %struct.ST** // CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP46]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-USE-PPC64LE-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 // CHECK-USE-PPC64LE-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP48]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK-USE-PPC64LE-NEXT: store i64 4, i64* [[TMP49]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP50]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK-USE-PPC64LE-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to %struct.ST** -// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP52]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK-USE-PPC64LE-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[J2]], i32** [[TMP54]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK-USE-PPC64LE-NEXT: store i64 4, i64* [[TMP55]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP56:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP56]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP57:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP58:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP59:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP60:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654.region_id, i32 7, i8** [[TMP57]], i8** [[TMP58]], i64* [[TMP59]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) -// CHECK-USE-PPC64LE-NEXT: [[TMP61:%.*]] = icmp ne i32 [[TMP60]], 0 -// CHECK-USE-PPC64LE-NEXT: br i1 [[TMP61]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-USE-PPC64LE-NEXT: store i32* [[J2]], i32** [[TMP48]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP49]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606.region_id, i32 7, i8** [[TMP50]], i8** [[TMP51]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) +// CHECK-USE-PPC64LE-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 +// CHECK-USE-PPC64LE-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-USE-PPC64LE: omp_offload.failed: -// CHECK-USE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654(%struct.ST* [[ST1]], i32* [[A]], %struct.ST* [[ST2]]) #[[ATTR2:[0-9]+]] +// CHECK-USE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606(%struct.ST* [[ST1]], i32* [[A]], %struct.ST* [[ST2]]) #[[ATTR2:[0-9]+]] // CHECK-USE-PPC64LE-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-USE-PPC64LE: omp_offload.cont: -// CHECK-USE-PPC64LE-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP63]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP65]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP66]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP69:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668.region_id, i32 1, i8** [[TMP67]], i8** [[TMP68]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.1, i32 0, i32 0), i8** null, i8** null) -// CHECK-USE-PPC64LE-NEXT: [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0 -// CHECK-USE-PPC64LE-NEXT: br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] +// CHECK-USE-PPC64LE-NEXT: [[TMP54:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP55]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP56:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP57]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP58:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP58]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620.region_id, i32 1, i8** [[TMP59]], i8** [[TMP60]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) +// CHECK-USE-PPC64LE-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 +// CHECK-USE-PPC64LE-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] // CHECK-USE-PPC64LE: omp_offload.failed6: -// CHECK-USE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668(i32* [[A]]) #[[ATTR2]] +// CHECK-USE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620(i32* [[A]]) #[[ATTR2]] // CHECK-USE-PPC64LE-NEXT: br label [[OMP_OFFLOAD_CONT7]] // CHECK-USE-PPC64LE: omp_offload.cont7: // CHECK-USE-PPC64LE-NEXT: ret void @@ -251,7 +245,6 @@ // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 4 // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 4 // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 4 -// CHECK-USE-I386-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 4 // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 @@ -282,95 +275,82 @@ // CHECK-USE-I386-NEXT: [[TMP17:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-USE-I386-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32** // CHECK-USE-I386-NEXT: store i32* [[I]], i32** [[TMP18]], align 4 -// CHECK-USE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 4 -// CHECK-USE-I386-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP20]], align 4 -// CHECK-USE-I386-NEXT: [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.ST** -// CHECK-USE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP22]], align 4 -// CHECK-USE-I386-NEXT: [[TMP23:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[I]], i32** [[TMP24]], align 4 -// CHECK-USE-I386-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: store i64 4, i64* [[TMP25]], align 4 -// CHECK-USE-I386-NEXT: [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP26]], align 4 -// CHECK-USE-I386-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST** -// CHECK-USE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 4 -// CHECK-USE-I386-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[J]], i32** [[TMP30]], align 4 -// CHECK-USE-I386-NEXT: [[TMP31:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: store i64 4, i64* [[TMP31]], align 4 -// CHECK-USE-I386-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP32]], align 4 -// CHECK-USE-I386-NEXT: [[TMP33:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK-USE-I386-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP34]], align 4 -// CHECK-USE-I386-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK-USE-I386-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP36]], align 4 -// CHECK-USE-I386-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK-USE-I386-NEXT: store i64 4, i64* [[TMP37]], align 4 -// CHECK-USE-I386-NEXT: [[TMP38:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP38]], align 4 -// CHECK-USE-I386-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK-USE-I386-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to %struct.ST** -// CHECK-USE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP40]], align 4 -// CHECK-USE-I386-NEXT: [[TMP41:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK-USE-I386-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[I1]], i32** [[TMP42]], align 4 -// CHECK-USE-I386-NEXT: [[TMP43:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK-USE-I386-NEXT: store i64 [[TMP14]], i64* [[TMP43]], align 4 -// CHECK-USE-I386-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK-USE-I386-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), align 4 +// CHECK-USE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK-USE-I386-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-USE-I386-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.ST** +// CHECK-USE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP21]], align 4 +// CHECK-USE-I386-NEXT: [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-USE-I386-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[I]], i32** [[TMP23]], align 4 +// CHECK-USE-I386-NEXT: [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK-USE-I386-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-USE-I386-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.ST** +// CHECK-USE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP26]], align 4 +// CHECK-USE-I386-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-USE-I386-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[J]], i32** [[TMP28]], align 4 +// CHECK-USE-I386-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK-USE-I386-NEXT: [[TMP30:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK-USE-I386-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP31]], align 4 +// CHECK-USE-I386-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK-USE-I386-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP33]], align 4 +// CHECK-USE-I386-NEXT: [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK-USE-I386-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK-USE-I386-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to %struct.ST** +// CHECK-USE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP36]], align 4 +// CHECK-USE-I386-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK-USE-I386-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[I1]], i32** [[TMP38]], align 4 +// CHECK-USE-I386-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK-USE-I386-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP39]], align 4 +// CHECK-USE-I386-NEXT: [[TMP40:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-USE-I386-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to %struct.ST** +// CHECK-USE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP41]], align 4 +// CHECK-USE-I386-NEXT: [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-USE-I386-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[I1]], i32** [[TMP43]], align 4 +// CHECK-USE-I386-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP44]], align 4 -// CHECK-USE-I386-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-USE-I386-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 // CHECK-USE-I386-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to %struct.ST** // CHECK-USE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP46]], align 4 -// CHECK-USE-I386-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-USE-I386-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 // CHECK-USE-I386-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[I1]], i32** [[TMP48]], align 4 -// CHECK-USE-I386-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK-USE-I386-NEXT: store i64 4, i64* [[TMP49]], align 4 -// CHECK-USE-I386-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP50]], align 4 -// CHECK-USE-I386-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK-USE-I386-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to %struct.ST** -// CHECK-USE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP52]], align 4 -// CHECK-USE-I386-NEXT: [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK-USE-I386-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[J2]], i32** [[TMP54]], align 4 -// CHECK-USE-I386-NEXT: [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK-USE-I386-NEXT: store i64 4, i64* [[TMP55]], align 4 -// CHECK-USE-I386-NEXT: [[TMP56:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP56]], align 4 -// CHECK-USE-I386-NEXT: [[TMP57:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP58:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP59:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP60:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654.region_id, i32 7, i8** [[TMP57]], i8** [[TMP58]], i64* [[TMP59]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) -// CHECK-USE-I386-NEXT: [[TMP61:%.*]] = icmp ne i32 [[TMP60]], 0 -// CHECK-USE-I386-NEXT: br i1 [[TMP61]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-USE-I386-NEXT: store i32* [[J2]], i32** [[TMP48]], align 4 +// CHECK-USE-I386-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP49]], align 4 +// CHECK-USE-I386-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606.region_id, i32 7, i8** [[TMP50]], i8** [[TMP51]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) +// CHECK-USE-I386-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 +// CHECK-USE-I386-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-USE-I386: omp_offload.failed: -// CHECK-USE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654(%struct.ST* [[ST1]], i32* [[A]], %struct.ST* [[ST2]]) #[[ATTR2:[0-9]+]] +// CHECK-USE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606(%struct.ST* [[ST1]], i32* [[A]], %struct.ST* [[ST2]]) #[[ATTR2:[0-9]+]] // CHECK-USE-I386-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-USE-I386: omp_offload.cont: -// CHECK-USE-I386-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP63]], align 4 -// CHECK-USE-I386-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP65]], align 4 -// CHECK-USE-I386-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP66]], align 4 -// CHECK-USE-I386-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP69:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668.region_id, i32 1, i8** [[TMP67]], i8** [[TMP68]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.1, i32 0, i32 0), i8** null, i8** null) -// CHECK-USE-I386-NEXT: [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0 -// CHECK-USE-I386-NEXT: br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] +// CHECK-USE-I386-NEXT: [[TMP54:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP55]], align 4 +// CHECK-USE-I386-NEXT: [[TMP56:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[A]], i32** [[TMP57]], align 4 +// CHECK-USE-I386-NEXT: [[TMP58:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP58]], align 4 +// CHECK-USE-I386-NEXT: [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620.region_id, i32 1, i8** [[TMP59]], i8** [[TMP60]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) +// CHECK-USE-I386-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 +// CHECK-USE-I386-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] // CHECK-USE-I386: omp_offload.failed6: -// CHECK-USE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668(i32* [[A]]) #[[ATTR2]] +// CHECK-USE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620(i32* [[A]]) #[[ATTR2]] // CHECK-USE-I386-NEXT: br label [[OMP_OFFLOAD_CONT7]] // CHECK-USE-I386: omp_offload.cont7: // CHECK-USE-I386-NEXT: ret void @@ -384,7 +364,6 @@ // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 8 @@ -415,95 +394,82 @@ // CHECK-NOUSE-PPC64LE-NEXT: [[TMP17:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-NOUSE-PPC64LE-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32** // CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP18]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP20]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.ST** -// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP22]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP24]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 4, i64* [[TMP25]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP26]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST** -// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP30]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP31:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 4, i64* [[TMP31]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP32]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP33:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP34]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP36]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 4, i64* [[TMP37]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP38:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP38]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to %struct.ST** -// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP40]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP41:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP42]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP43:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 [[TMP14]], i64* [[TMP43]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK-NOUSE-PPC64LE-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.ST** +// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP21]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP23]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.ST** +// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP26]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP28]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP30:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP31]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP33]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to %struct.ST** +// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP36]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP38]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP39]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP40:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to %struct.ST** +// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP41]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP43]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP44]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 // CHECK-NOUSE-PPC64LE-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to %struct.ST** // CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP46]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 // CHECK-NOUSE-PPC64LE-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I1]], i32** [[TMP48]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 4, i64* [[TMP49]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP50]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to %struct.ST** -// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP52]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[J2]], i32** [[TMP54]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 4, i64* [[TMP55]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP56:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP56]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP57:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP58:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP59:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP60:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654.region_id, i32 7, i8** [[TMP57]], i8** [[TMP58]], i64* [[TMP59]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP61:%.*]] = icmp ne i32 [[TMP60]], 0 -// CHECK-NOUSE-PPC64LE-NEXT: br i1 [[TMP61]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[J2]], i32** [[TMP48]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP49]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606.region_id, i32 7, i8** [[TMP50]], i8** [[TMP51]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 +// CHECK-NOUSE-PPC64LE-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-NOUSE-PPC64LE: omp_offload.failed: -// CHECK-NOUSE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654() #[[ATTR2:[0-9]+]] +// CHECK-NOUSE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606() #[[ATTR2:[0-9]+]] // CHECK-NOUSE-PPC64LE-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-NOUSE-PPC64LE: omp_offload.cont: -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP63]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP65]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP66]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP69:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668.region_id, i32 1, i8** [[TMP67]], i8** [[TMP68]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.1, i32 0, i32 0), i8** null, i8** null) -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0 -// CHECK-NOUSE-PPC64LE-NEXT: br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP54:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP55]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP56:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[A]], i32** [[TMP57]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP58:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i64 0, i64 0 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP58]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620.region_id, i32 1, i8** [[TMP59]], i8** [[TMP60]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 +// CHECK-NOUSE-PPC64LE-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] // CHECK-NOUSE-PPC64LE: omp_offload.failed6: -// CHECK-NOUSE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668() #[[ATTR2]] +// CHECK-NOUSE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620() #[[ATTR2]] // CHECK-NOUSE-PPC64LE-NEXT: br label [[OMP_OFFLOAD_CONT7]] // CHECK-NOUSE-PPC64LE: omp_offload.cont7: // CHECK-NOUSE-PPC64LE-NEXT: ret void @@ -517,7 +483,6 @@ // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [7 x i8*], align 4 // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [7 x i8*], align 4 // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [7 x i8*], align 4 -// CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [7 x i64], align 4 // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_BASEPTRS3:%.*]] = alloca [1 x i8*], align 4 // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_PTRS4:%.*]] = alloca [1 x i8*], align 4 // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_MAPPERS5:%.*]] = alloca [1 x i8*], align 4 @@ -548,95 +513,82 @@ // CHECK-NOUSE-I386-NEXT: [[TMP17:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-NOUSE-I386-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32** // CHECK-NOUSE-I386-NEXT: store i32* [[I]], i32** [[TMP18]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP20]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP21:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.ST** -// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP22]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP23:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[I]], i32** [[TMP24]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: store i64 4, i64* [[TMP25]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP26:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP26]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to %struct.ST** -// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP28]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[J]], i32** [[TMP30]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP31:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: store i64 4, i64* [[TMP31]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP32]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP33:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK-NOUSE-I386-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP34]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK-NOUSE-I386-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP36]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK-NOUSE-I386-NEXT: store i64 4, i64* [[TMP37]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP38:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP38]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK-NOUSE-I386-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to %struct.ST** -// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP40]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP41:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK-NOUSE-I386-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[I1]], i32** [[TMP42]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP43:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK-NOUSE-I386-NEXT: store i64 [[TMP14]], i64* [[TMP43]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK-NOUSE-I386-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP20:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-NOUSE-I386-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.ST** +// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP21]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP22:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-NOUSE-I386-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[I]], i32** [[TMP23]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP24:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP25:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-NOUSE-I386-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to %struct.ST** +// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST1]], %struct.ST** [[TMP26]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP27:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-NOUSE-I386-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[J]], i32** [[TMP28]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP29:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP30:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK-NOUSE-I386-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP31]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP32:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK-NOUSE-I386-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP33]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP34:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP35:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK-NOUSE-I386-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to %struct.ST** +// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP36]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP37:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK-NOUSE-I386-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[I1]], i32** [[TMP38]], align 4 +// CHECK-NOUSE-I386-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP39:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP39]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP40:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-NOUSE-I386-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to %struct.ST** +// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP41]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP42:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-NOUSE-I386-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[I1]], i32** [[TMP43]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP44:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP44]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK-NOUSE-I386-NEXT: [[TMP45:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 // CHECK-NOUSE-I386-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to %struct.ST** // CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP46]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK-NOUSE-I386-NEXT: [[TMP47:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 // CHECK-NOUSE-I386-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[I1]], i32** [[TMP48]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK-NOUSE-I386-NEXT: store i64 4, i64* [[TMP49]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP50]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK-NOUSE-I386-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to %struct.ST** -// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[ST2]], %struct.ST** [[TMP52]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP53:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK-NOUSE-I386-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[J2]], i32** [[TMP54]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP55:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK-NOUSE-I386-NEXT: store i64 4, i64* [[TMP55]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP56:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP56]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP57:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP58:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP59:%.*]] = getelementptr inbounds [7 x i64], [7 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP60:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654.region_id, i32 7, i8** [[TMP57]], i8** [[TMP58]], i64* [[TMP59]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) -// CHECK-NOUSE-I386-NEXT: [[TMP61:%.*]] = icmp ne i32 [[TMP60]], 0 -// CHECK-NOUSE-I386-NEXT: br i1 [[TMP61]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-NOUSE-I386-NEXT: store i32* [[J2]], i32** [[TMP48]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP49:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP49]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP50:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP51:%.*]] = getelementptr inbounds [7 x i8*], [7 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606.region_id, i32 7, i8** [[TMP50]], i8** [[TMP51]], i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([7 x i64], [7 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null) +// CHECK-NOUSE-I386-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 +// CHECK-NOUSE-I386-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-NOUSE-I386: omp_offload.failed: -// CHECK-NOUSE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l654() #[[ATTR2:[0-9]+]] +// CHECK-NOUSE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l606() #[[ATTR2:[0-9]+]] // CHECK-NOUSE-I386-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-NOUSE-I386: omp_offload.cont: -// CHECK-NOUSE-I386-NEXT: [[TMP62:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP63]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP64:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP65]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP66:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP66]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP67:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP68:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP69:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668.region_id, i32 1, i8** [[TMP67]], i8** [[TMP68]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.1, i32 0, i32 0), i8** null, i8** null) -// CHECK-NOUSE-I386-NEXT: [[TMP70:%.*]] = icmp ne i32 [[TMP69]], 0 -// CHECK-NOUSE-I386-NEXT: br i1 [[TMP70]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] +// CHECK-NOUSE-I386-NEXT: [[TMP54:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP55]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP56:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[A]], i32** [[TMP57]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP58:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS5]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP58]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP59:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS3]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP60:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS4]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620.region_id, i32 1, i8** [[TMP59]], i8** [[TMP60]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.1, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) +// CHECK-NOUSE-I386-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 +// CHECK-NOUSE-I386-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED6:%.*]], label [[OMP_OFFLOAD_CONT7:%.*]] // CHECK-NOUSE-I386: omp_offload.failed6: -// CHECK-NOUSE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l668() #[[ATTR2]] +// CHECK-NOUSE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z20explicit_maps_singlei_l620() #[[ATTR2]] // CHECK-NOUSE-I386-NEXT: br label [[OMP_OFFLOAD_CONT7]] // CHECK-NOUSE-I386: omp_offload.cont7: // CHECK-NOUSE-I386-NEXT: ret void @@ -679,7 +631,6 @@ // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK-USE-PPC64LE-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[THIS:%.*]], %struct.ST** [[THIS_ADDR]], align 8 // CHECK-USE-PPC64LE-NEXT: [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 8 // CHECK-USE-PPC64LE-NEXT: [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0 @@ -697,38 +648,32 @@ // CHECK-USE-PPC64LE-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-USE-PPC64LE-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32** // CHECK-USE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP10]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP12]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-USE-PPC64LE-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.ST** -// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP14]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-USE-PPC64LE-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP16]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-USE-PPC64LE-NEXT: store i64 4, i64* [[TMP17]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-USE-PPC64LE-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST** -// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-USE-PPC64LE-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32** -// CHECK-USE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP22]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-USE-PPC64LE-NEXT: store i64 4, i64* [[TMP23]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK-USE-PPC64LE-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-PPC64LE-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) -// CHECK-USE-PPC64LE-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK-USE-PPC64LE-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-USE-PPC64LE-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP11]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-USE-PPC64LE-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.ST** +// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP13]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-USE-PPC64LE-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP15]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-USE-PPC64LE-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.ST** +// CHECK-USE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP18]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-USE-PPC64LE-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32** +// CHECK-USE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP20]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK-USE-PPC64LE-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK-USE-PPC64LE-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-USE-PPC64LE-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null) +// CHECK-USE-PPC64LE-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK-USE-PPC64LE-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-USE-PPC64LE: omp_offload.failed: -// CHECK-USE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919(%struct.ST* [[THIS1]]) #[[ATTR2]] +// CHECK-USE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843(%struct.ST* [[THIS1]]) #[[ATTR2]] // CHECK-USE-PPC64LE-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-USE-PPC64LE: omp_offload.cont: // CHECK-USE-PPC64LE-NEXT: ret void @@ -739,7 +684,6 @@ // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK-USE-I386-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK-USE-I386-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK-USE-I386-NEXT: store %struct.ST* [[THIS:%.*]], %struct.ST** [[THIS_ADDR]], align 4 // CHECK-USE-I386-NEXT: [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 4 // CHECK-USE-I386-NEXT: [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0 @@ -757,38 +701,32 @@ // CHECK-USE-I386-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-USE-I386-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32** // CHECK-USE-I386-NEXT: store i32* [[I]], i32** [[TMP10]], align 4 -// CHECK-USE-I386-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 4 -// CHECK-USE-I386-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP12]], align 4 -// CHECK-USE-I386-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.ST** -// CHECK-USE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP14]], align 4 -// CHECK-USE-I386-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[I]], i32** [[TMP16]], align 4 -// CHECK-USE-I386-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: store i64 4, i64* [[TMP17]], align 4 -// CHECK-USE-I386-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK-USE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST** -// CHECK-USE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 4 -// CHECK-USE-I386-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32** -// CHECK-USE-I386-NEXT: store i32* [[J]], i32** [[TMP22]], align 4 -// CHECK-USE-I386-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: store i64 4, i64* [[TMP23]], align 4 -// CHECK-USE-I386-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK-USE-I386-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-USE-I386-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) -// CHECK-USE-I386-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK-USE-I386-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-USE-I386-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), align 4 +// CHECK-USE-I386-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP11]], align 4 +// CHECK-USE-I386-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-USE-I386-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.ST** +// CHECK-USE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP13]], align 4 +// CHECK-USE-I386-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-USE-I386-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[I]], i32** [[TMP15]], align 4 +// CHECK-USE-I386-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP16]], align 4 +// CHECK-USE-I386-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-USE-I386-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.ST** +// CHECK-USE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP18]], align 4 +// CHECK-USE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-USE-I386-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32** +// CHECK-USE-I386-NEXT: store i32* [[J]], i32** [[TMP20]], align 4 +// CHECK-USE-I386-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK-USE-I386-NEXT: store i8* null, i8** [[TMP21]], align 4 +// CHECK-USE-I386-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-USE-I386-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null) +// CHECK-USE-I386-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK-USE-I386-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-USE-I386: omp_offload.failed: -// CHECK-USE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919(%struct.ST* [[THIS1]]) #[[ATTR2]] +// CHECK-USE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843(%struct.ST* [[THIS1]]) #[[ATTR2]] // CHECK-USE-I386-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-USE-I386: omp_offload.cont: // CHECK-USE-I386-NEXT: ret void @@ -799,7 +737,6 @@ // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[THIS:%.*]], %struct.ST** [[THIS_ADDR]], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 8 // CHECK-NOUSE-PPC64LE-NEXT: [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0 @@ -817,38 +754,32 @@ // CHECK-NOUSE-PPC64LE-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-NOUSE-PPC64LE-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32** // CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP10]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP12]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.ST** -// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP14]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP16]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 4, i64* [[TMP17]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST** -// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32** -// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP22]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-NOUSE-PPC64LE-NEXT: store i64 4, i64* [[TMP23]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) -// CHECK-NOUSE-PPC64LE-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK-NOUSE-PPC64LE-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-NOUSE-PPC64LE-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP11]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.ST** +// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP13]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[I]], i32** [[TMP15]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.ST** +// CHECK-NOUSE-PPC64LE-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP18]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32** +// CHECK-NOUSE-PPC64LE-NEXT: store i32* [[J]], i32** [[TMP20]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK-NOUSE-PPC64LE-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null) +// CHECK-NOUSE-PPC64LE-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK-NOUSE-PPC64LE-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-NOUSE-PPC64LE: omp_offload.failed: -// CHECK-NOUSE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919() #[[ATTR2]] +// CHECK-NOUSE-PPC64LE-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843() #[[ATTR2]] // CHECK-NOUSE-PPC64LE-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-NOUSE-PPC64LE: omp_offload.cont: // CHECK-NOUSE-PPC64LE-NEXT: ret void @@ -859,7 +790,6 @@ // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK-NOUSE-I386-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK-NOUSE-I386-NEXT: store %struct.ST* [[THIS:%.*]], %struct.ST** [[THIS_ADDR]], align 4 // CHECK-NOUSE-I386-NEXT: [[THIS1:%.*]] = load %struct.ST*, %struct.ST** [[THIS_ADDR]], align 4 // CHECK-NOUSE-I386-NEXT: [[I:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.ST* [[THIS1]], i32 0, i32 0 @@ -877,38 +807,32 @@ // CHECK-NOUSE-I386-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK-NOUSE-I386-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to i32** // CHECK-NOUSE-I386-NEXT: store i32* [[I]], i32** [[TMP10]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP12]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.ST** -// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP14]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[I]], i32** [[TMP16]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: store i64 4, i64* [[TMP17]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.ST** -// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP20]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32** -// CHECK-NOUSE-I386-NEXT: store i32* [[J]], i32** [[TMP22]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: store i64 4, i64* [[TMP23]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK-NOUSE-I386-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK-NOUSE-I386-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null) -// CHECK-NOUSE-I386-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK-NOUSE-I386-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK-NOUSE-I386-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP11]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK-NOUSE-I386-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.ST** +// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP13]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK-NOUSE-I386-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[I]], i32** [[TMP15]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP16]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK-NOUSE-I386-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.ST** +// CHECK-NOUSE-I386-NEXT: store %struct.ST* [[THIS1]], %struct.ST** [[TMP18]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK-NOUSE-I386-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32** +// CHECK-NOUSE-I386-NEXT: store i32* [[J]], i32** [[TMP20]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK-NOUSE-I386-NEXT: store i8* null, i8** [[TMP21]], align 4 +// CHECK-NOUSE-I386-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK-NOUSE-I386-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.3, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null) +// CHECK-NOUSE-I386-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK-NOUSE-I386-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK-NOUSE-I386: omp_offload.failed: -// CHECK-NOUSE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l919() #[[ATTR2]] +// CHECK-NOUSE-I386-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2ST20test_present_membersEv_l843() #[[ATTR2]] // CHECK-NOUSE-I386-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK-NOUSE-I386: omp_offload.cont: // CHECK-NOUSE-I386-NEXT: ret void diff --git a/clang/test/OpenMP/target_map_member_expr_array_section_codegen.cpp b/clang/test/OpenMP/target_map_member_expr_array_section_codegen.cpp --- a/clang/test/OpenMP/target_map_member_expr_array_section_codegen.cpp +++ b/clang/test/OpenMP/target_map_member_expr_array_section_codegen.cpp @@ -5,9 +5,11 @@ #ifndef HEADER #define HEADER +// CHECK: [[SIZE_ENTER:@.+]] = private unnamed_addr global [2 x i64] [i64 0, i64 24] // 0 = OMP_MAP_NONE // 281474976710656 = 0x1000000000000 = OMP_MAP_MEMBER_OF of 1-st element // CHECK: [[MAP_ENTER:@.+]] = private unnamed_addr constant [2 x i64] [i64 0, i64 281474976710656] +// CHECK: [[SIZE_EXIT:@.+]] = private unnamed_addr global [2 x i64] [i64 0, i64 24] // 281474976710664 = 0x1000000000008 = OMP_MAP_MEMBER_OF of 1-st element | OMP_MAP_DELETE // CHECK: [[MAP_EXIT:@.+]] = private unnamed_addr constant [2 x i64] [i64 0, i64 281474976710664] template @@ -22,7 +24,6 @@ maptest() { // CHECK: [[BPTRS:%.+]] = alloca [2 x i8*], // CHECK: [[PTRS:%.+]] = alloca [2 x i8*], - // CHECK: [[SIZES:%.+]] = alloca [2 x i64], // CHECK: getelementptr inbounds // CHECK: [[S_ADDR:%.+]] = getelementptr inbounds %struct.maptest, %struct.maptest* [[THIS:%.+]], i32 0, i32 0 // CHECK: [[S_DATA_ADDR:%.+]] = getelementptr inbounds %struct.S, %struct.S* [[S_ADDR]], i32 0, i32 0 @@ -47,27 +48,22 @@ // CHECK: [[PTR0:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[PTRS]], i32 0, i32 0 // CHECK: [[PTR0_DATA:%.+]] = bitcast i8** [[PTR0]] to float** // CHECK: store float* [[S_DATA_0_ADDR]], float** [[PTR0_DATA]], - // CHECK: [[SIZE0:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0 - // CHECK: store i64 [[SZ]], i64* [[SIZE0]], + // CHECK: store i64 [[SZ]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE_ENTER]], i32 0, i32 0), // CHECK: [[BPTR1:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BPTRS]], i32 0, i32 1 // CHECK: [[BPTR1_THIS:%.+]] = bitcast i8** [[BPTR1]] to %struct.maptest** // CHECK: store %struct.maptest* [[THIS]], %struct.maptest** [[BPTR1_THIS]], // CHECK: [[PTR1:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[PTRS]], i32 0, i32 1 // CHECK: [[PTR1_DATA:%.+]] = bitcast i8** [[PTR1]] to float** // CHECK: store float* [[S_DATA_0_ADDR]], float** [[PTR1_DATA]], - // CHECK: [[SIZE1:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 1 - // CHECK: store i64 24, i64* [[SIZE1]], // CHECK: [[BPTR:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BPTRS]], i32 0, i32 0 // CHECK: [[PTR:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[PTRS]], i32 0, i32 0 - // CHECK: [[SIZE:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0 - // CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[BPTR]], i8** [[PTR]], i64* [[SIZE]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[MAP_ENTER]], i32 0, i32 0), i8** null, i8** null) + // CHECK: call void @__tgt_target_data_begin_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[BPTR]], i8** [[PTR]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE_ENTER]], i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[MAP_ENTER]], i32 0, i32 0), i8** null, i8** null) #pragma omp target enter data map(alloc : s.data[:6]) } ~maptest() { // CHECK: [[BPTRS:%.+]] = alloca [2 x i8*], // CHECK: [[PTRS:%.+]] = alloca [2 x i8*], - // CHECK: [[SIZE:%.+]] = alloca [2 x i64], // CHECK: [[S_ADDR:%.+]] = getelementptr inbounds %struct.maptest, %struct.maptest* [[THIS:%.+]], i32 0, i32 0 // CHECK: [[S_DATA_ADDR:%.+]] = getelementptr inbounds %struct.S, %struct.S* [[S_ADDR]], i32 0, i32 0 // CHECK: [[S_DATA_0_ADDR:%.+]] = getelementptr inbounds [6 x float], [6 x float]* [[S_DATA_ADDR]], i64 0, i64 0 @@ -91,20 +87,16 @@ // CHECK: [[PTR0:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[PTRS]], i32 0, i32 0 // CHECK: [[PTR0_DATA:%.+]] = bitcast i8** [[PTR0]] to float** // CHECK: store float* [[S_DATA_0_ADDR]], float** [[PTR0_DATA]], - // CHECK: [[SIZE0:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0 - // CHECK: store i64 [[SZ]], i64* [[SIZE0]], + // CHECK: store i64 [[SZ]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE_EXIT]], i32 0, i32 0), // CHECK: [[BPTR1:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BPTRS]], i32 0, i32 1 // CHECK: [[BPTR1_THIS:%.+]] = bitcast i8** [[BPTR1]] to %struct.maptest** // CHECK: store %struct.maptest* [[THIS]], %struct.maptest** [[BPTR1_THIS]], // CHECK: [[PTR1:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[PTRS]], i32 0, i32 1 // CHECK: [[PTR1_DATA:%.+]] = bitcast i8** [[PTR1]] to float** // CHECK: store float* [[S_DATA_0_ADDR]], float** [[PTR1_DATA]], - // CHECK: [[SIZE1:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 1 - // CHECK: store i64 24, i64* [[SIZE1]], // CHECK: [[BPTR:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BPTRS]], i32 0, i32 0 // CHECK: [[PTR:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[PTRS]], i32 0, i32 0 - // CHECK: [[SIZE:%.+]] = getelementptr inbounds [2 x i64], [2 x i64]* [[SIZES]], i32 0, i32 0 - // CHECK: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[BPTR]], i8** [[PTR]], i64* [[SIZE]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[MAP_EXIT]], i32 0, i32 0), i8** null, i8** null) + // CHECK: call void @__tgt_target_data_end_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[BPTR]], i8** [[PTR]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE_EXIT]], i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[MAP_EXIT]], i32 0, i32 0), i8** null, i8** null) #pragma omp target exit data map(delete : s.data[:6]) } }; diff --git a/clang/test/OpenMP/target_parallel_codegen.cpp b/clang/test/OpenMP/target_parallel_codegen.cpp --- a/clang/test/OpenMP/target_parallel_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_codegen.cpp @@ -318,7 +318,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 @@ -423,96 +422,79 @@ // CHECK1-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 // CHECK1-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64* // CHECK1-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8 -// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 4, i64* [[TMP53]], align 8 -// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP54]], align 8 -// CHECK1-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8 -// CHECK1-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8 -// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 40, i64* [[TMP59]], align 8 -// CHECK1-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP60]], align 8 -// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP53]], align 8 +// CHECK1-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 8 +// CHECK1-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 8 +// CHECK1-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP58]], align 8 +// CHECK1-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP60]], align 8 +// CHECK1-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 // CHECK1-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8 -// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8 -// CHECK1-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP65]], align 8 -// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP66]], align 8 -// CHECK1-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP68]], align 8 -// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP70]], align 8 -// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8 -// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP72]], align 8 -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8 -// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8 -// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 400, i64* [[TMP77]], align 8 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK1-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP63]], align 8 +// CHECK1-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP65]], align 8 +// CHECK1-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP67]], align 8 +// CHECK1-NEXT: store i64 [[TMP46]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK1-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP68]], align 8 +// CHECK1-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 8 +// CHECK1-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 8 +// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP73]], align 8 +// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP75]], align 8 +// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP77]], align 8 +// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 // CHECK1-NEXT: store i8* null, i8** [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 // CHECK1-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP80]], align 8 -// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP80]], align 8 +// CHECK1-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 // CHECK1-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP82]], align 8 -// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK1-NEXT: store i64 8, i64* [[TMP83]], align 8 -// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 -// CHECK1-NEXT: store i8* null, i8** [[TMP84]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8 -// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8 -// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK1-NEXT: store i64 8, i64* [[TMP89]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 -// CHECK1-NEXT: store i8* null, i8** [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8 -// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK1-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8 -// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 -// CHECK1-NEXT: store i8* null, i8** [[TMP96]], align 8 -// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8 -// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16, i64* [[TMP101]], align 8 -// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 -// CHECK1-NEXT: store i8* null, i8** [[TMP102]], align 8 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK1-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK1-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 +// CHECK1-NEXT: store i8* null, i8** [[TMP83]], align 8 +// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP85]], align 8 +// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP87]], align 8 +// CHECK1-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 +// CHECK1-NEXT: store i8* null, i8** [[TMP88]], align 8 +// CHECK1-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 8 +// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 8 +// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 +// CHECK1-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK1-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] // CHECK1: omp_offload.failed19: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT20]] @@ -522,10 +504,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END22]] // CHECK1: omp_if.end22: -// CHECK1-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK1-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK1-NEXT: ret i32 [[TMP108]] +// CHECK1-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK1-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK1-NEXT: ret i32 [[TMP98]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -863,7 +845,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -894,56 +875,46 @@ // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK1-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK1-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -953,15 +924,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK1-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK1-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK1-NEXT: ret i32 [[ADD4]] // // @@ -1033,7 +1004,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1102,7 +1073,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1141,11 +1112,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1214,11 +1185,11 @@ // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1279,11 +1250,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1348,7 +1319,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 @@ -1453,96 +1423,79 @@ // CHECK2-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 // CHECK2-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64* // CHECK2-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8 -// CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 4, i64* [[TMP53]], align 8 -// CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP54]], align 8 -// CHECK2-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8 -// CHECK2-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8 -// CHECK2-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 40, i64* [[TMP59]], align 8 -// CHECK2-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP60]], align 8 -// CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP53]], align 8 +// CHECK2-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 8 +// CHECK2-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 8 +// CHECK2-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP58]], align 8 +// CHECK2-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP60]], align 8 +// CHECK2-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 // CHECK2-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8 -// CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8 -// CHECK2-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP65]], align 8 -// CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP66]], align 8 -// CHECK2-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP68]], align 8 -// CHECK2-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP70]], align 8 -// CHECK2-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8 -// CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP72]], align 8 -// CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8 -// CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8 -// CHECK2-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 400, i64* [[TMP77]], align 8 -// CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK2-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP63]], align 8 +// CHECK2-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP65]], align 8 +// CHECK2-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP67]], align 8 +// CHECK2-NEXT: store i64 [[TMP46]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK2-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP68]], align 8 +// CHECK2-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 8 +// CHECK2-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 8 +// CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP73]], align 8 +// CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP75]], align 8 +// CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP77]], align 8 +// CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 // CHECK2-NEXT: store i8* null, i8** [[TMP78]], align 8 -// CHECK2-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 // CHECK2-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP80]], align 8 -// CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP80]], align 8 +// CHECK2-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 // CHECK2-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP82]], align 8 -// CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK2-NEXT: store i64 8, i64* [[TMP83]], align 8 -// CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 -// CHECK2-NEXT: store i8* null, i8** [[TMP84]], align 8 -// CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8 -// CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8 -// CHECK2-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK2-NEXT: store i64 8, i64* [[TMP89]], align 8 -// CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 -// CHECK2-NEXT: store i8* null, i8** [[TMP90]], align 8 -// CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8 -// CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8 -// CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK2-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8 -// CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 -// CHECK2-NEXT: store i8* null, i8** [[TMP96]], align 8 -// CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8 -// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8 -// CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK2-NEXT: store i64 16, i64* [[TMP101]], align 8 -// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 -// CHECK2-NEXT: store i8* null, i8** [[TMP102]], align 8 -// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK2-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK2-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP82]], align 8 +// CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 +// CHECK2-NEXT: store i8* null, i8** [[TMP83]], align 8 +// CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP85]], align 8 +// CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP87]], align 8 +// CHECK2-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 +// CHECK2-NEXT: store i8* null, i8** [[TMP88]], align 8 +// CHECK2-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 8 +// CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 8 +// CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 +// CHECK2-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK2-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] // CHECK2: omp_offload.failed19: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT20]] @@ -1552,10 +1505,10 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END22]] // CHECK2: omp_if.end22: -// CHECK2-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK2-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK2-NEXT: ret i32 [[TMP108]] +// CHECK2-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK2-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK2-NEXT: ret i32 [[TMP98]] // // // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -1893,7 +1846,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -1924,56 +1876,46 @@ // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK2-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK2-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK2-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1983,15 +1925,15 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END]] // CHECK2: omp_if.end: -// CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK2-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK2-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK2-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK2-NEXT: ret i32 [[ADD4]] // // @@ -2063,7 +2005,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK2-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK2-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -2132,7 +2074,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -2171,11 +2113,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2244,11 +2186,11 @@ // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK2-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2309,11 +2251,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2378,7 +2320,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 @@ -2480,96 +2421,79 @@ // CHECK3-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK3-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32* // CHECK3-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4 -// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 4, i64* [[TMP53]], align 4 -// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK3-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4 -// CHECK3-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4 -// CHECK3-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 40, i64* [[TMP59]], align 4 -// CHECK3-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP60]], align 4 -// CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP53]], align 4 +// CHECK3-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 4 +// CHECK3-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 4 +// CHECK3-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP58]], align 4 +// CHECK3-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP60]], align 4 +// CHECK3-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 // CHECK3-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4 -// CHECK3-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4 -// CHECK3-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP65]], align 4 -// CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP66]], align 4 -// CHECK3-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP68]], align 4 -// CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP70]], align 4 -// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4 -// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP72]], align 4 -// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4 -// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4 -// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 400, i64* [[TMP77]], align 4 -// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP63]], align 4 +// CHECK3-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP65]], align 4 +// CHECK3-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP67]], align 4 +// CHECK3-NEXT: store i64 [[TMP45]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK3-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP68]], align 4 +// CHECK3-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 4 +// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 4 +// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP73]], align 4 +// CHECK3-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP75]], align 4 +// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP77]], align 4 +// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 // CHECK3-NEXT: store i8* null, i8** [[TMP78]], align 4 -// CHECK3-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 // CHECK3-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP80]], align 4 -// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP80]], align 4 +// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 // CHECK3-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP82]], align 4 -// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK3-NEXT: store i64 4, i64* [[TMP83]], align 4 -// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 -// CHECK3-NEXT: store i8* null, i8** [[TMP84]], align 4 -// CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4 -// CHECK3-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4 -// CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK3-NEXT: store i64 4, i64* [[TMP89]], align 4 -// CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 -// CHECK3-NEXT: store i8* null, i8** [[TMP90]], align 4 -// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4 -// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4 -// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK3-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4 -// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 -// CHECK3-NEXT: store i8* null, i8** [[TMP96]], align 4 -// CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4 -// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4 -// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK3-NEXT: store i64 12, i64* [[TMP101]], align 4 -// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 -// CHECK3-NEXT: store i8* null, i8** [[TMP102]], align 4 -// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK3-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK3-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP82]], align 4 +// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 +// CHECK3-NEXT: store i8* null, i8** [[TMP83]], align 4 +// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP85]], align 4 +// CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP87]], align 4 +// CHECK3-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK3-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 +// CHECK3-NEXT: store i8* null, i8** [[TMP88]], align 4 +// CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 4 +// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 4 +// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 +// CHECK3-NEXT: store i8* null, i8** [[TMP93]], align 4 +// CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK3-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] // CHECK3: omp_offload.failed16: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT17]] @@ -2579,10 +2503,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END19]] // CHECK3: omp_if.end19: -// CHECK3-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK3-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK3-NEXT: ret i32 [[TMP108]] +// CHECK3-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK3-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK3-NEXT: ret i32 [[TMP98]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -2911,7 +2835,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -2941,56 +2864,46 @@ // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK3-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK3-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK3-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3000,15 +2913,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK3-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK3-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK3-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -3079,7 +2992,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -3147,7 +3060,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -3184,11 +3097,11 @@ // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3254,11 +3167,11 @@ // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3316,11 +3229,11 @@ // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3384,7 +3297,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 @@ -3486,96 +3398,79 @@ // CHECK4-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK4-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32* // CHECK4-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4 -// CHECK4-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 4, i64* [[TMP53]], align 4 -// CHECK4-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK4-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4 -// CHECK4-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4 -// CHECK4-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 40, i64* [[TMP59]], align 4 -// CHECK4-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP60]], align 4 -// CHECK4-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP53]], align 4 +// CHECK4-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 4 +// CHECK4-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 4 +// CHECK4-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP58]], align 4 +// CHECK4-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP60]], align 4 +// CHECK4-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 // CHECK4-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32* // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4 -// CHECK4-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4 -// CHECK4-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP65]], align 4 -// CHECK4-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP66]], align 4 -// CHECK4-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP68]], align 4 -// CHECK4-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP70]], align 4 -// CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4 -// CHECK4-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP72]], align 4 -// CHECK4-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4 -// CHECK4-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4 -// CHECK4-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 400, i64* [[TMP77]], align 4 -// CHECK4-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP63]], align 4 +// CHECK4-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP65]], align 4 +// CHECK4-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP67]], align 4 +// CHECK4-NEXT: store i64 [[TMP45]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK4-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP68]], align 4 +// CHECK4-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 4 +// CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 4 +// CHECK4-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP73]], align 4 +// CHECK4-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP75]], align 4 +// CHECK4-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP77]], align 4 +// CHECK4-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 // CHECK4-NEXT: store i8* null, i8** [[TMP78]], align 4 -// CHECK4-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 // CHECK4-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP80]], align 4 -// CHECK4-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP80]], align 4 +// CHECK4-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 // CHECK4-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP82]], align 4 -// CHECK4-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK4-NEXT: store i64 4, i64* [[TMP83]], align 4 -// CHECK4-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 -// CHECK4-NEXT: store i8* null, i8** [[TMP84]], align 4 -// CHECK4-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4 -// CHECK4-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4 -// CHECK4-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK4-NEXT: store i64 4, i64* [[TMP89]], align 4 -// CHECK4-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 -// CHECK4-NEXT: store i8* null, i8** [[TMP90]], align 4 -// CHECK4-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4 -// CHECK4-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4 -// CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK4-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4 -// CHECK4-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 -// CHECK4-NEXT: store i8* null, i8** [[TMP96]], align 4 -// CHECK4-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4 -// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4 -// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK4-NEXT: store i64 12, i64* [[TMP101]], align 4 -// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 -// CHECK4-NEXT: store i8* null, i8** [[TMP102]], align 4 -// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK4-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK4-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP82]], align 4 +// CHECK4-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 +// CHECK4-NEXT: store i8* null, i8** [[TMP83]], align 4 +// CHECK4-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP85]], align 4 +// CHECK4-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP87]], align 4 +// CHECK4-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK4-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 +// CHECK4-NEXT: store i8* null, i8** [[TMP88]], align 4 +// CHECK4-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 4 +// CHECK4-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 4 +// CHECK4-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 +// CHECK4-NEXT: store i8* null, i8** [[TMP93]], align 4 +// CHECK4-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK4-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] // CHECK4: omp_offload.failed16: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT17]] @@ -3585,10 +3480,10 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END19]] // CHECK4: omp_if.end19: -// CHECK4-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK4-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK4-NEXT: ret i32 [[TMP108]] +// CHECK4-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK4-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK4-NEXT: ret i32 [[TMP98]] // // // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -3917,7 +3812,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -3947,56 +3841,46 @@ // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK4-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK4-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK4-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK4-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4006,15 +3890,15 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END]] // CHECK4: omp_if.end: -// CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK4-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK4-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK4-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK4-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK4-NEXT: ret i32 [[ADD3]] // // @@ -4085,7 +3969,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK4-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK4-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -4153,7 +4037,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -4190,11 +4074,11 @@ // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4260,11 +4144,11 @@ // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK4-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4322,11 +4206,11 @@ // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6016,7 +5900,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 @@ -6121,96 +6004,79 @@ // CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 // CHECK17-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64* // CHECK17-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8 -// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP53]], align 8 -// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP54]], align 8 -// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8 -// CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8 -// CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 40, i64* [[TMP59]], align 8 -// CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP60]], align 8 -// CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP53]], align 8 +// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 8 +// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 8 +// CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP58]], align 8 +// CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP60]], align 8 +// CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 // CHECK17-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8 -// CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8 -// CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP65]], align 8 -// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP66]], align 8 -// CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP68]], align 8 -// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP70]], align 8 -// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8 -// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP72]], align 8 -// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8 -// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8 -// CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 400, i64* [[TMP77]], align 8 -// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP63]], align 8 +// CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP65]], align 8 +// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP67]], align 8 +// CHECK17-NEXT: store i64 [[TMP46]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP68]], align 8 +// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 8 +// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 8 +// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP73]], align 8 +// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP75]], align 8 +// CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP77]], align 8 +// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 // CHECK17-NEXT: store i8* null, i8** [[TMP78]], align 8 -// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 // CHECK17-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP80]], align 8 -// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP80]], align 8 +// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 // CHECK17-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP82]], align 8 -// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK17-NEXT: store i64 8, i64* [[TMP83]], align 8 -// CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 -// CHECK17-NEXT: store i8* null, i8** [[TMP84]], align 8 -// CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8 -// CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8 -// CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK17-NEXT: store i64 8, i64* [[TMP89]], align 8 -// CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 -// CHECK17-NEXT: store i8* null, i8** [[TMP90]], align 8 -// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8 -// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8 -// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK17-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8 -// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 -// CHECK17-NEXT: store i8* null, i8** [[TMP96]], align 8 -// CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8 -// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8 -// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK17-NEXT: store i64 16, i64* [[TMP101]], align 8 -// CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 -// CHECK17-NEXT: store i8* null, i8** [[TMP102]], align 8 -// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK17-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK17-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP82]], align 8 +// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 +// CHECK17-NEXT: store i8* null, i8** [[TMP83]], align 8 +// CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP85]], align 8 +// CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP87]], align 8 +// CHECK17-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK17-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 +// CHECK17-NEXT: store i8* null, i8** [[TMP88]], align 8 +// CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 8 +// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 8 +// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 +// CHECK17-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK17-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] // CHECK17: omp_offload.failed19: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT20]] @@ -6220,10 +6086,10 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END22]] // CHECK17: omp_if.end22: -// CHECK17-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK17-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK17-NEXT: ret i32 [[TMP108]] +// CHECK17-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK17-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK17-NEXT: ret i32 [[TMP98]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -6561,7 +6427,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK17-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -6592,56 +6457,46 @@ // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK17-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK17-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK17-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK17-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK17-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK17-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK17-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK17-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6651,15 +6506,15 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END]] // CHECK17: omp_if.end: -// CHECK17-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK17-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK17-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK17-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK17-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK17-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK17-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK17-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK17-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK17-NEXT: ret i32 [[ADD4]] // // @@ -6731,7 +6586,7 @@ // CHECK17-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK17-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK17-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -6800,7 +6655,7 @@ // CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK17-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -6839,11 +6694,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6912,11 +6767,11 @@ // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK17-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6977,11 +6832,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -7046,7 +6901,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS16:%.*]] = alloca [9 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS17:%.*]] = alloca [9 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS18:%.*]] = alloca [9 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 @@ -7151,96 +7005,79 @@ // CHECK18-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 // CHECK18-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i64* // CHECK18-NEXT: store i64 [[TMP44]], i64* [[TMP52]], align 8 -// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP53]], align 8 -// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP54]], align 8 -// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 8 -// CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 8 -// CHECK18-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 40, i64* [[TMP59]], align 8 -// CHECK18-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP60]], align 8 -// CHECK18-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP53]], align 8 +// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 8 +// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 8 +// CHECK18-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP58]], align 8 +// CHECK18-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP60]], align 8 +// CHECK18-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 // CHECK18-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP62]], align 8 -// CHECK18-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP64]], align 8 -// CHECK18-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP65]], align 8 -// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP66]], align 8 -// CHECK18-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP68]], align 8 -// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP70]], align 8 -// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP46]], i64* [[TMP71]], align 8 -// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP72]], align 8 -// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 8 -// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 8 -// CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 400, i64* [[TMP77]], align 8 -// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK18-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP63]], align 8 +// CHECK18-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP65]], align 8 +// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP67]], align 8 +// CHECK18-NEXT: store i64 [[TMP46]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK18-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP68]], align 8 +// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 8 +// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 8 +// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP73]], align 8 +// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP75]], align 8 +// CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP77]], align 8 +// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 // CHECK18-NEXT: store i8* null, i8** [[TMP78]], align 8 -// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 // CHECK18-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP80]], align 8 -// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 5 +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP80]], align 8 +// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 // CHECK18-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP82]], align 8 -// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK18-NEXT: store i64 8, i64* [[TMP83]], align 8 -// CHECK18-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 5 -// CHECK18-NEXT: store i8* null, i8** [[TMP84]], align 8 -// CHECK18-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP86]], align 8 -// CHECK18-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP88]], align 8 -// CHECK18-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK18-NEXT: store i64 8, i64* [[TMP89]], align 8 -// CHECK18-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 -// CHECK18-NEXT: store i8* null, i8** [[TMP90]], align 8 -// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP92]], align 8 -// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP94]], align 8 -// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK18-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 8 -// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 -// CHECK18-NEXT: store i8* null, i8** [[TMP96]], align 8 -// CHECK18-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 8 -// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 8 -// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK18-NEXT: store i64 16, i64* [[TMP101]], align 8 -// CHECK18-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 -// CHECK18-NEXT: store i8* null, i8** [[TMP102]], align 8 -// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK18-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK18-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP82]], align 8 +// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 6 +// CHECK18-NEXT: store i8* null, i8** [[TMP83]], align 8 +// CHECK18-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP85]], align 8 +// CHECK18-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP87]], align 8 +// CHECK18-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK18-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 7 +// CHECK18-NEXT: store i8* null, i8** [[TMP88]], align 8 +// CHECK18-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 8 +// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 8 +// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS18]], i64 0, i64 8 +// CHECK18-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS16]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS17]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK18-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED19:%.*]], label [[OMP_OFFLOAD_CONT20:%.*]] // CHECK18: omp_offload.failed19: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT20]] @@ -7250,10 +7087,10 @@ // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i64 [[TMP44]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_IF_END22]] // CHECK18: omp_if.end22: -// CHECK18-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK18-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK18-NEXT: ret i32 [[TMP108]] +// CHECK18-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK18-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK18-NEXT: ret i32 [[TMP98]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -7591,7 +7428,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK18-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -7622,56 +7458,46 @@ // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK18-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK18-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK18-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK18-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK18-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK18-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK18-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK18-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -7681,15 +7507,15 @@ // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_IF_END]] // CHECK18: omp_if.end: -// CHECK18-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK18-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK18-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK18-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK18-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK18-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK18-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK18-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK18-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK18-NEXT: ret i32 [[ADD4]] // // @@ -7761,7 +7587,7 @@ // CHECK18-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK18-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK18-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -7830,7 +7656,7 @@ // CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK18-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK18-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -7869,11 +7695,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -7942,11 +7768,11 @@ // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK18-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8007,11 +7833,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8076,7 +7902,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 @@ -8178,96 +8003,79 @@ // CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32* // CHECK19-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4 -// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP53]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4 -// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4 -// CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 40, i64* [[TMP59]], align 4 -// CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP60]], align 4 -// CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP53]], align 4 +// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 4 +// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 4 +// CHECK19-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP58]], align 4 +// CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP60]], align 4 +// CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 // CHECK19-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32* // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4 -// CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4 -// CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP65]], align 4 -// CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP66]], align 4 -// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP68]], align 4 -// CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP70]], align 4 -// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4 -// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP72]], align 4 -// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4 -// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4 -// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 400, i64* [[TMP77]], align 4 -// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP63]], align 4 +// CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP65]], align 4 +// CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP67]], align 4 +// CHECK19-NEXT: store i64 [[TMP45]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP68]], align 4 +// CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 4 +// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 4 +// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP73]], align 4 +// CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP75]], align 4 +// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP77]], align 4 +// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 // CHECK19-NEXT: store i8* null, i8** [[TMP78]], align 4 -// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 // CHECK19-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP80]], align 4 -// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP80]], align 4 +// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 // CHECK19-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP82]], align 4 -// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK19-NEXT: store i64 4, i64* [[TMP83]], align 4 -// CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 -// CHECK19-NEXT: store i8* null, i8** [[TMP84]], align 4 -// CHECK19-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4 -// CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4 -// CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK19-NEXT: store i64 4, i64* [[TMP89]], align 4 -// CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 -// CHECK19-NEXT: store i8* null, i8** [[TMP90]], align 4 -// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4 -// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4 -// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK19-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4 -// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 -// CHECK19-NEXT: store i8* null, i8** [[TMP96]], align 4 -// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4 -// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4 -// CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK19-NEXT: store i64 12, i64* [[TMP101]], align 4 -// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 -// CHECK19-NEXT: store i8* null, i8** [[TMP102]], align 4 -// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK19-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK19-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP82]], align 4 +// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 +// CHECK19-NEXT: store i8* null, i8** [[TMP83]], align 4 +// CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP85]], align 4 +// CHECK19-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP87]], align 4 +// CHECK19-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 +// CHECK19-NEXT: store i8* null, i8** [[TMP88]], align 4 +// CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 4 +// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 4 +// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 +// CHECK19-NEXT: store i8* null, i8** [[TMP93]], align 4 +// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK19-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] // CHECK19: omp_offload.failed16: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT17]] @@ -8277,10 +8085,10 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END19]] // CHECK19: omp_if.end19: -// CHECK19-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK19-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK19-NEXT: ret i32 [[TMP108]] +// CHECK19-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK19-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK19-NEXT: ret i32 [[TMP98]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -8609,7 +8417,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK19-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -8639,56 +8446,46 @@ // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK19-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK19-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK19-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK19-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK19-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK19-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK19-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK19-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -8698,15 +8495,15 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END]] // CHECK19: omp_if.end: -// CHECK19-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK19-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK19-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK19-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK19-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK19-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK19-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK19-NEXT: ret i32 [[ADD3]] // // @@ -8777,7 +8574,7 @@ // CHECK19-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK19-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK19-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -8845,7 +8642,7 @@ // CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK19-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -8882,11 +8679,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8952,11 +8749,11 @@ // CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK19-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9014,11 +8811,11 @@ // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9082,7 +8879,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS13:%.*]] = alloca [9 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS14:%.*]] = alloca [9 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS15:%.*]] = alloca [9 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]]) // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 @@ -9184,96 +8980,79 @@ // CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32* // CHECK20-NEXT: store i32 [[TMP42]], i32* [[TMP52]], align 4 -// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP53]], align 4 -// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP56]], align 4 -// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP58]], align 4 -// CHECK20-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 40, i64* [[TMP59]], align 4 -// CHECK20-NEXT: [[TMP60:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP60]], align 4 -// CHECK20-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP53]], align 4 +// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP55]], align 4 +// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP57]], align 4 +// CHECK20-NEXT: [[TMP58:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP58]], align 4 +// CHECK20-NEXT: [[TMP59:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP60]], align 4 +// CHECK20-NEXT: [[TMP61:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 // CHECK20-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i32* // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP62]], align 4 -// CHECK20-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP64]], align 4 -// CHECK20-NEXT: [[TMP65:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP65]], align 4 -// CHECK20-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP66]], align 4 -// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP68]], align 4 -// CHECK20-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP70]], align 4 -// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP45]], i64* [[TMP71]], align 4 -// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP72]], align 4 -// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP74]], align 4 -// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP76]], align 4 -// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 400, i64* [[TMP77]], align 4 -// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP63:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP63]], align 4 +// CHECK20-NEXT: [[TMP64:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP65]], align 4 +// CHECK20-NEXT: [[TMP66:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP67]], align 4 +// CHECK20-NEXT: store i64 [[TMP45]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP68:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP68]], align 4 +// CHECK20-NEXT: [[TMP69:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP70]], align 4 +// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP72]], align 4 +// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP73]], align 4 +// CHECK20-NEXT: [[TMP74:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP75]], align 4 +// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP77]], align 4 +// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 // CHECK20-NEXT: store i8* null, i8** [[TMP78]], align 4 -// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 // CHECK20-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP80]], align 4 -// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 5 +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP80]], align 4 +// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 // CHECK20-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP82]], align 4 -// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK20-NEXT: store i64 4, i64* [[TMP83]], align 4 -// CHECK20-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 5 -// CHECK20-NEXT: store i8* null, i8** [[TMP84]], align 4 -// CHECK20-NEXT: [[TMP85:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP86]], align 4 -// CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP88]], align 4 -// CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK20-NEXT: store i64 4, i64* [[TMP89]], align 4 -// CHECK20-NEXT: [[TMP90:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 -// CHECK20-NEXT: store i8* null, i8** [[TMP90]], align 4 -// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP92]], align 4 -// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP94]], align 4 -// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK20-NEXT: store i64 [[TMP48]], i64* [[TMP95]], align 4 -// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 -// CHECK20-NEXT: store i8* null, i8** [[TMP96]], align 4 -// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP98]], align 4 -// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP100]], align 4 -// CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK20-NEXT: store i64 12, i64* [[TMP101]], align 4 -// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 -// CHECK20-NEXT: store i8* null, i8** [[TMP102]], align 4 -// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP106:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP103]], i8** [[TMP104]], i64* [[TMP105]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK20-NEXT: [[TMP107:%.*]] = icmp ne i32 [[TMP106]], 0 -// CHECK20-NEXT: br i1 [[TMP107]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP82]], align 4 +// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 6 +// CHECK20-NEXT: store i8* null, i8** [[TMP83]], align 4 +// CHECK20-NEXT: [[TMP84:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP85]], align 4 +// CHECK20-NEXT: [[TMP86:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP87]], align 4 +// CHECK20-NEXT: store i64 [[TMP48]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK20-NEXT: [[TMP88:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 7 +// CHECK20-NEXT: store i8* null, i8** [[TMP88]], align 4 +// CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP90]], align 4 +// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP92]], align 4 +// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS15]], i32 0, i32 8 +// CHECK20-NEXT: store i8* null, i8** [[TMP93]], align 4 +// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP96:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144.region_id, i32 9, i8** [[TMP94]], i8** [[TMP95]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP97:%.*]] = icmp ne i32 [[TMP96]], 0 +// CHECK20-NEXT: br i1 [[TMP97]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] // CHECK20: omp_offload.failed16: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT17]] @@ -9283,10 +9062,10 @@ // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l144(i32 [[TMP42]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_IF_END19]] // CHECK20: omp_if.end19: -// CHECK20-NEXT: [[TMP108:%.*]] = load i32, i32* [[A]], align 4 -// CHECK20-NEXT: [[TMP109:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP109]]) -// CHECK20-NEXT: ret i32 [[TMP108]] +// CHECK20-NEXT: [[TMP98:%.*]] = load i32, i32* [[A]], align 4 +// CHECK20-NEXT: [[TMP99:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP99]]) +// CHECK20-NEXT: ret i32 [[TMP98]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l100 @@ -9615,7 +9394,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK20-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -9645,56 +9423,46 @@ // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK20-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK20-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK20-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK20-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK20-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK20-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK20-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK20-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -9704,15 +9472,15 @@ // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_IF_END]] // CHECK20: omp_if.end: -// CHECK20-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK20-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK20-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK20-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK20-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK20-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK20-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK20-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK20-NEXT: ret i32 [[ADD3]] // // @@ -9783,7 +9551,7 @@ // CHECK20-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l198.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK20-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK20-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -9851,7 +9619,7 @@ // CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l181.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK20-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK20-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -9888,11 +9656,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9958,11 +9726,11 @@ // CHECK20-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK20-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -10020,11 +9788,11 @@ // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_parallel_for_codegen.cpp b/clang/test/OpenMP/target_parallel_for_codegen.cpp --- a/clang/test/OpenMP/target_parallel_for_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_for_codegen.cpp @@ -356,7 +356,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 @@ -526,106 +525,87 @@ // CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK1-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* // CHECK1-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 -// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 4, i64* [[TMP93]], align 8 -// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP94]], align 8 -// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 -// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 40, i64* [[TMP99]], align 8 -// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 8 +// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP97]], align 8 +// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP98]], align 8 +// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP100]], align 8 +// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK1-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 -// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP105]], align 8 -// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 -// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 -// CHECK1-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 -// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 -// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 -// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 400, i64* [[TMP117]], align 8 -// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP103]], align 8 +// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP105]], align 8 +// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP107]], align 8 +// CHECK1-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP108]], align 8 +// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 8 +// CHECK1-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 8 +// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP113]], align 8 +// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP115]], align 8 +// CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP117]], align 8 +// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK1-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK1-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP120]], align 8 -// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP120]], align 8 +// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK1-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP122]], align 8 -// CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK1-NEXT: store i64 8, i64* [[TMP123]], align 8 -// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK1-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 -// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 -// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK1-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK1-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 -// CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 -// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK1-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 -// CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK1-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 -// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 -// CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16, i64* [[TMP141]], align 8 -// CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK1-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* -// CHECK1-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 -// CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* -// CHECK1-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 -// CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK1-NEXT: store i64 4, i64* [[TMP147]], align 8 -// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK1-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK1-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK1-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP122]], align 8 +// CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK1-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP125]], align 8 +// CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP127]], align 8 +// CHECK1-NEXT: store i64 [[TMP88]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK1-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 8 +// CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP132]], align 8 +// CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK1-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK1-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* +// CHECK1-NEXT: store i64 [[TMP84]], i64* [[TMP135]], align 8 +// CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK1-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK1-NEXT: store i64 [[TMP84]], i64* [[TMP137]], align 8 +// CHECK1-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK1-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP141:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP142:%.*]] = icmp ne i32 [[TMP141]], 0 +// CHECK1-NEXT: br i1 [[TMP142]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK1: omp_offload.failed23: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -635,10 +615,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END26]] // CHECK1: omp_if.end26: -// CHECK1-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK1-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK1-NEXT: ret i32 [[TMP154]] +// CHECK1-NEXT: [[TMP143:%.*]] = load i32, i32* [[A]], align 4 +// CHECK1-NEXT: [[TMP144:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP144]]) +// CHECK1-NEXT: ret i32 [[TMP143]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -1411,7 +1391,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -1442,56 +1421,46 @@ // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK1-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK1-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1501,15 +1470,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK1-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK1-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK1-NEXT: ret i32 [[ADD4]] // // @@ -1581,7 +1550,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1650,7 +1619,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1689,11 +1658,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1812,11 +1781,11 @@ // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1862,11 +1831,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1994,7 +1963,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 @@ -2164,106 +2132,87 @@ // CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK2-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* // CHECK2-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 -// CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 4, i64* [[TMP93]], align 8 -// CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP94]], align 8 -// CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 -// CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 -// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 40, i64* [[TMP99]], align 8 -// CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 8 +// CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP97]], align 8 +// CHECK2-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP98]], align 8 +// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP100]], align 8 +// CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK2-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 -// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 -// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP105]], align 8 -// CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 -// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 -// CHECK2-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 -// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 -// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 -// CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 400, i64* [[TMP117]], align 8 -// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP103]], align 8 +// CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP105]], align 8 +// CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP107]], align 8 +// CHECK2-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK2-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP108]], align 8 +// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 8 +// CHECK2-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 8 +// CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP113]], align 8 +// CHECK2-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP115]], align 8 +// CHECK2-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP117]], align 8 +// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK2-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK2-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP120]], align 8 -// CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP120]], align 8 +// CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK2-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP122]], align 8 -// CHECK2-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK2-NEXT: store i64 8, i64* [[TMP123]], align 8 -// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK2-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK2-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 -// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 -// CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK2-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK2-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK2-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 -// CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 -// CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK2-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 -// CHECK2-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK2-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 -// CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 -// CHECK2-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK2-NEXT: store i64 16, i64* [[TMP141]], align 8 -// CHECK2-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK2-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK2-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* -// CHECK2-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 -// CHECK2-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* -// CHECK2-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 -// CHECK2-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK2-NEXT: store i64 4, i64* [[TMP147]], align 8 -// CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK2-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK2-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK2-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK2-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP122]], align 8 +// CHECK2-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK2-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP125]], align 8 +// CHECK2-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP127]], align 8 +// CHECK2-NEXT: store i64 [[TMP88]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK2-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK2-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 8 +// CHECK2-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP132]], align 8 +// CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK2-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK2-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK2-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* +// CHECK2-NEXT: store i64 [[TMP84]], i64* [[TMP135]], align 8 +// CHECK2-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK2-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK2-NEXT: store i64 [[TMP84]], i64* [[TMP137]], align 8 +// CHECK2-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK2-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP141:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP142:%.*]] = icmp ne i32 [[TMP141]], 0 +// CHECK2-NEXT: br i1 [[TMP142]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK2: omp_offload.failed23: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -2273,10 +2222,10 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END26]] // CHECK2: omp_if.end26: -// CHECK2-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK2-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK2-NEXT: ret i32 [[TMP154]] +// CHECK2-NEXT: [[TMP143:%.*]] = load i32, i32* [[A]], align 4 +// CHECK2-NEXT: [[TMP144:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP144]]) +// CHECK2-NEXT: ret i32 [[TMP143]] // // // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -3049,7 +2998,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -3080,56 +3028,46 @@ // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK2-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK2-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK2-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3139,15 +3077,15 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END]] // CHECK2: omp_if.end: -// CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK2-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK2-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK2-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK2-NEXT: ret i32 [[ADD4]] // // @@ -3219,7 +3157,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK2-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK2-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3288,7 +3226,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3327,11 +3265,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3450,11 +3388,11 @@ // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK2-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3500,11 +3438,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3631,7 +3569,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 @@ -3792,106 +3729,87 @@ // CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK3-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* // CHECK3-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 -// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 4, i64* [[TMP91]], align 4 -// CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP92]], align 4 -// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 -// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 -// CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 40, i64* [[TMP97]], align 4 -// CHECK3-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP98]], align 4 -// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP91]], align 4 +// CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 4 +// CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 4 +// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP96]], align 4 +// CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP98]], align 4 +// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK3-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 -// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 -// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP103]], align 4 -// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP104]], align 4 -// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 -// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 -// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 -// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP110]], align 4 -// CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 -// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 -// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 400, i64* [[TMP115]], align 4 -// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP101]], align 4 +// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP103]], align 4 +// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP105]], align 4 +// CHECK3-NEXT: store i64 [[TMP83]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP106]], align 4 +// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP108]], align 4 +// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 4 +// CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP111]], align 4 +// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP113]], align 4 +// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP115]], align 4 +// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK3-NEXT: store i8* null, i8** [[TMP116]], align 4 -// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK3-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP118]], align 4 -// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP118]], align 4 +// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK3-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP120]], align 4 -// CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK3-NEXT: store i64 4, i64* [[TMP121]], align 4 -// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK3-NEXT: store i8* null, i8** [[TMP122]], align 4 -// CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 -// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 -// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK3-NEXT: store i64 4, i64* [[TMP127]], align 4 -// CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK3-NEXT: store i8* null, i8** [[TMP128]], align 4 -// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 -// CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 -// CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK3-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 -// CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK3-NEXT: store i8* null, i8** [[TMP134]], align 4 -// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 -// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 -// CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK3-NEXT: store i64 12, i64* [[TMP139]], align 4 -// CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK3-NEXT: store i8* null, i8** [[TMP140]], align 4 -// CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* -// CHECK3-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 -// CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* -// CHECK3-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 -// CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK3-NEXT: store i64 4, i64* [[TMP145]], align 4 -// CHECK3-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK3-NEXT: store i8* null, i8** [[TMP146]], align 4 -// CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK3-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 -// CHECK3-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP120]], align 4 +// CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK3-NEXT: store i8* null, i8** [[TMP121]], align 4 +// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP123]], align 4 +// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP125]], align 4 +// CHECK3-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK3-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK3-NEXT: store i8* null, i8** [[TMP126]], align 4 +// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP128]], align 4 +// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 4 +// CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK3-NEXT: store i8* null, i8** [[TMP131]], align 4 +// CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK3-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK3-NEXT: store i32 [[TMP80]], i32* [[TMP133]], align 4 +// CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK3-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* +// CHECK3-NEXT: store i32 [[TMP80]], i32* [[TMP135]], align 4 +// CHECK3-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK3-NEXT: store i8* null, i8** [[TMP136]], align 4 +// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP139:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP137]], i8** [[TMP138]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP140:%.*]] = icmp ne i32 [[TMP139]], 0 +// CHECK3-NEXT: br i1 [[TMP140]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK3: omp_offload.failed17: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -3901,10 +3819,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END20]] // CHECK3: omp_if.end20: -// CHECK3-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 -// CHECK3-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) -// CHECK3-NEXT: ret i32 [[TMP152]] +// CHECK3-NEXT: [[TMP141:%.*]] = load i32, i32* [[A]], align 4 +// CHECK3-NEXT: [[TMP142:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP142]]) +// CHECK3-NEXT: ret i32 [[TMP141]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -4655,7 +4573,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -4685,56 +4602,46 @@ // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK3-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK3-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK3-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4744,15 +4651,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK3-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK3-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK3-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -4823,7 +4730,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -4891,7 +4798,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -4928,11 +4835,11 @@ // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5048,11 +4955,11 @@ // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5095,11 +5002,11 @@ // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5225,7 +5132,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 @@ -5386,106 +5292,87 @@ // CHECK4-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK4-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* // CHECK4-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 -// CHECK4-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 4, i64* [[TMP91]], align 4 -// CHECK4-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP92]], align 4 -// CHECK4-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 -// CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 -// CHECK4-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 40, i64* [[TMP97]], align 4 -// CHECK4-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP98]], align 4 -// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP91]], align 4 +// CHECK4-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 4 +// CHECK4-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 4 +// CHECK4-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP96]], align 4 +// CHECK4-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP98]], align 4 +// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK4-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 -// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 -// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP103]], align 4 -// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP104]], align 4 -// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 -// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 -// CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 -// CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP110]], align 4 -// CHECK4-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 -// CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 -// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 400, i64* [[TMP115]], align 4 -// CHECK4-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP101]], align 4 +// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP103]], align 4 +// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP105]], align 4 +// CHECK4-NEXT: store i64 [[TMP83]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK4-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP106]], align 4 +// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP108]], align 4 +// CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 4 +// CHECK4-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP111]], align 4 +// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP113]], align 4 +// CHECK4-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP115]], align 4 +// CHECK4-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK4-NEXT: store i8* null, i8** [[TMP116]], align 4 -// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK4-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP118]], align 4 -// CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP118]], align 4 +// CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK4-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP120]], align 4 -// CHECK4-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK4-NEXT: store i64 4, i64* [[TMP121]], align 4 -// CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK4-NEXT: store i8* null, i8** [[TMP122]], align 4 -// CHECK4-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 -// CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 -// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK4-NEXT: store i64 4, i64* [[TMP127]], align 4 -// CHECK4-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK4-NEXT: store i8* null, i8** [[TMP128]], align 4 -// CHECK4-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 -// CHECK4-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 -// CHECK4-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK4-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 -// CHECK4-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK4-NEXT: store i8* null, i8** [[TMP134]], align 4 -// CHECK4-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 -// CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 -// CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK4-NEXT: store i64 12, i64* [[TMP139]], align 4 -// CHECK4-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK4-NEXT: store i8* null, i8** [[TMP140]], align 4 -// CHECK4-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* -// CHECK4-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 -// CHECK4-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* -// CHECK4-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 -// CHECK4-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK4-NEXT: store i64 4, i64* [[TMP145]], align 4 -// CHECK4-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK4-NEXT: store i8* null, i8** [[TMP146]], align 4 -// CHECK4-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK4-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 -// CHECK4-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP120]], align 4 +// CHECK4-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK4-NEXT: store i8* null, i8** [[TMP121]], align 4 +// CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP123]], align 4 +// CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP125]], align 4 +// CHECK4-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK4-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK4-NEXT: store i8* null, i8** [[TMP126]], align 4 +// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP128]], align 4 +// CHECK4-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 4 +// CHECK4-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK4-NEXT: store i8* null, i8** [[TMP131]], align 4 +// CHECK4-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK4-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK4-NEXT: store i32 [[TMP80]], i32* [[TMP133]], align 4 +// CHECK4-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK4-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* +// CHECK4-NEXT: store i32 [[TMP80]], i32* [[TMP135]], align 4 +// CHECK4-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK4-NEXT: store i8* null, i8** [[TMP136]], align 4 +// CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP139:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP137]], i8** [[TMP138]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP140:%.*]] = icmp ne i32 [[TMP139]], 0 +// CHECK4-NEXT: br i1 [[TMP140]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK4: omp_offload.failed17: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -5495,10 +5382,10 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END20]] // CHECK4: omp_if.end20: -// CHECK4-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 -// CHECK4-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) -// CHECK4-NEXT: ret i32 [[TMP152]] +// CHECK4-NEXT: [[TMP141:%.*]] = load i32, i32* [[A]], align 4 +// CHECK4-NEXT: [[TMP142:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP142]]) +// CHECK4-NEXT: ret i32 [[TMP141]] // // // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -6249,7 +6136,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -6279,56 +6165,46 @@ // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK4-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK4-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK4-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK4-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6338,15 +6214,15 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END]] // CHECK4: omp_if.end: -// CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK4-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK4-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK4-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK4-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK4-NEXT: ret i32 [[ADD3]] // // @@ -6417,7 +6293,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK4-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK4-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6485,7 +6361,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6522,11 +6398,11 @@ // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6642,11 +6518,11 @@ // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK4-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6689,11 +6565,11 @@ // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9988,7 +9864,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK17-NEXT: store i32 0, i32* [[A]], align 4 @@ -10158,106 +10033,87 @@ // CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK17-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* // CHECK17-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 -// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP93]], align 8 -// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP94]], align 8 -// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 -// CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 -// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 40, i64* [[TMP99]], align 8 -// CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 8 +// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP97]], align 8 +// CHECK17-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP98]], align 8 +// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP100]], align 8 +// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK17-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 -// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 -// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP105]], align 8 -// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 -// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 -// CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 -// CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 -// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 -// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 400, i64* [[TMP117]], align 8 -// CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP103]], align 8 +// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP105]], align 8 +// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP107]], align 8 +// CHECK17-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP108]], align 8 +// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 8 +// CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 8 +// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP113]], align 8 +// CHECK17-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP115]], align 8 +// CHECK17-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP117]], align 8 +// CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK17-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP120]], align 8 -// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP120]], align 8 +// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK17-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP122]], align 8 -// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK17-NEXT: store i64 8, i64* [[TMP123]], align 8 -// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK17-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 -// CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 -// CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK17-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK17-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 -// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 -// CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK17-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 -// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK17-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 -// CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 -// CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK17-NEXT: store i64 16, i64* [[TMP141]], align 8 -// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK17-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK17-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* -// CHECK17-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 -// CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK17-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* -// CHECK17-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 -// CHECK17-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK17-NEXT: store i64 4, i64* [[TMP147]], align 8 -// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK17-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK17-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK17-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK17-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP122]], align 8 +// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK17-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP125]], align 8 +// CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP127]], align 8 +// CHECK17-NEXT: store i64 [[TMP88]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK17-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 8 +// CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP132]], align 8 +// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK17-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK17-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK17-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* +// CHECK17-NEXT: store i64 [[TMP84]], i64* [[TMP135]], align 8 +// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK17-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK17-NEXT: store i64 [[TMP84]], i64* [[TMP137]], align 8 +// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK17-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP141:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP142:%.*]] = icmp ne i32 [[TMP141]], 0 +// CHECK17-NEXT: br i1 [[TMP142]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK17: omp_offload.failed23: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -10267,10 +10123,10 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END26]] // CHECK17: omp_if.end26: -// CHECK17-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK17-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK17-NEXT: ret i32 [[TMP154]] +// CHECK17-NEXT: [[TMP143:%.*]] = load i32, i32* [[A]], align 4 +// CHECK17-NEXT: [[TMP144:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP144]]) +// CHECK17-NEXT: ret i32 [[TMP143]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -11043,7 +10899,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK17-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -11074,56 +10929,46 @@ // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK17-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK17-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK17-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK17-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK17-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK17-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK17-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK17-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -11133,15 +10978,15 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_IF_END]] // CHECK17: omp_if.end: -// CHECK17-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK17-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK17-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK17-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK17-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK17-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK17-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK17-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK17-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK17-NEXT: ret i32 [[ADD4]] // // @@ -11213,7 +11058,7 @@ // CHECK17-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK17-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK17-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -11282,7 +11127,7 @@ // CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK17-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -11321,11 +11166,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11444,11 +11289,11 @@ // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK17-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11494,11 +11339,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11626,7 +11471,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK18-NEXT: store i32 0, i32* [[A]], align 4 @@ -11796,106 +11640,87 @@ // CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK18-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* // CHECK18-NEXT: store i64 [[TMP82]], i64* [[TMP92]], align 8 -// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP93]], align 8 -// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP94]], align 8 -// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 8 -// CHECK18-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP98]], align 8 -// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 40, i64* [[TMP99]], align 8 -// CHECK18-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP93]], align 8 +// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 8 +// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP97]], align 8 +// CHECK18-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP98]], align 8 +// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP100]], align 8 +// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK18-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i64* // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP102]], align 8 -// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP104]], align 8 -// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP105]], align 8 -// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP108]], align 8 -// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP110]], align 8 -// CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP86]], i64* [[TMP111]], align 8 -// CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 8 -// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 -// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 400, i64* [[TMP117]], align 8 -// CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP103]], align 8 +// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP105]], align 8 +// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP107]], align 8 +// CHECK18-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK18-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP108]], align 8 +// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 8 +// CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 8 +// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP113]], align 8 +// CHECK18-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP115]], align 8 +// CHECK18-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP117]], align 8 +// CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK18-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP120]], align 8 -// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP120]], align 8 +// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK18-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP122]], align 8 -// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK18-NEXT: store i64 8, i64* [[TMP123]], align 8 -// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK18-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 -// CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 -// CHECK18-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK18-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK18-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK18-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP132]], align 8 -// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 -// CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK18-NEXT: store i64 [[TMP88]], i64* [[TMP135]], align 8 -// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK18-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK18-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 -// CHECK18-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP140]], align 8 -// CHECK18-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK18-NEXT: store i64 16, i64* [[TMP141]], align 8 -// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK18-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK18-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* -// CHECK18-NEXT: store i64 [[TMP84]], i64* [[TMP144]], align 8 -// CHECK18-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK18-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* -// CHECK18-NEXT: store i64 [[TMP84]], i64* [[TMP146]], align 8 -// CHECK18-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK18-NEXT: store i64 4, i64* [[TMP147]], align 8 -// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK18-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK18-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK18-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK18-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP122]], align 8 +// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK18-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP125]], align 8 +// CHECK18-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP127]], align 8 +// CHECK18-NEXT: store i64 [[TMP88]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK18-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK18-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK18-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 8 +// CHECK18-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP132]], align 8 +// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK18-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK18-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK18-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* +// CHECK18-NEXT: store i64 [[TMP84]], i64* [[TMP135]], align 8 +// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK18-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK18-NEXT: store i64 [[TMP84]], i64* [[TMP137]], align 8 +// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK18-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK18-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP141:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP142:%.*]] = icmp ne i32 [[TMP141]], 0 +// CHECK18-NEXT: br i1 [[TMP142]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK18: omp_offload.failed23: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -11905,10 +11730,10 @@ // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i64 [[TMP82]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP84]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_IF_END26]] // CHECK18: omp_if.end26: -// CHECK18-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK18-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK18-NEXT: ret i32 [[TMP154]] +// CHECK18-NEXT: [[TMP143:%.*]] = load i32, i32* [[A]], align 4 +// CHECK18-NEXT: [[TMP144:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP144]]) +// CHECK18-NEXT: ret i32 [[TMP143]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -12681,7 +12506,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK18-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -12712,56 +12536,46 @@ // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK18-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK18-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK18-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK18-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK18-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK18-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK18-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK18-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -12771,15 +12585,15 @@ // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_IF_END]] // CHECK18: omp_if.end: -// CHECK18-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK18-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK18-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK18-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK18-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK18-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK18-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK18-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK18-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK18-NEXT: ret i32 [[ADD4]] // // @@ -12851,7 +12665,7 @@ // CHECK18-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK18-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK18-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -12920,7 +12734,7 @@ // CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK18-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK18-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -12959,11 +12773,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13082,11 +12896,11 @@ // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK18-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13132,11 +12946,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13263,7 +13077,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 0, i32* [[A]], align 4 @@ -13424,106 +13237,87 @@ // CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK19-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* // CHECK19-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 -// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP91]], align 4 -// CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP92]], align 4 -// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 -// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 -// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 40, i64* [[TMP97]], align 4 -// CHECK19-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP98]], align 4 -// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP91]], align 4 +// CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 4 +// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 4 +// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP96]], align 4 +// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP98]], align 4 +// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK19-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 -// CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 -// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP103]], align 4 -// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP104]], align 4 -// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 -// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 -// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 -// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP110]], align 4 -// CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 -// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 -// CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 400, i64* [[TMP115]], align 4 -// CHECK19-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP101]], align 4 +// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP103]], align 4 +// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP105]], align 4 +// CHECK19-NEXT: store i64 [[TMP83]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP106]], align 4 +// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP108]], align 4 +// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 4 +// CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP111]], align 4 +// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP113]], align 4 +// CHECK19-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP115]], align 4 +// CHECK19-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK19-NEXT: store i8* null, i8** [[TMP116]], align 4 -// CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK19-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP118]], align 4 -// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP118]], align 4 +// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK19-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP120]], align 4 -// CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK19-NEXT: store i64 4, i64* [[TMP121]], align 4 -// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK19-NEXT: store i8* null, i8** [[TMP122]], align 4 -// CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 -// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 -// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK19-NEXT: store i64 4, i64* [[TMP127]], align 4 -// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK19-NEXT: store i8* null, i8** [[TMP128]], align 4 -// CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 -// CHECK19-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 -// CHECK19-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK19-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 -// CHECK19-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK19-NEXT: store i8* null, i8** [[TMP134]], align 4 -// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 -// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 -// CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK19-NEXT: store i64 12, i64* [[TMP139]], align 4 -// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK19-NEXT: store i8* null, i8** [[TMP140]], align 4 -// CHECK19-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK19-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* -// CHECK19-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 -// CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK19-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* -// CHECK19-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 -// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK19-NEXT: store i64 4, i64* [[TMP145]], align 4 -// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK19-NEXT: store i8* null, i8** [[TMP146]], align 4 -// CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK19-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 -// CHECK19-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP120]], align 4 +// CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK19-NEXT: store i8* null, i8** [[TMP121]], align 4 +// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP123]], align 4 +// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP125]], align 4 +// CHECK19-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK19-NEXT: store i8* null, i8** [[TMP126]], align 4 +// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP128]], align 4 +// CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 4 +// CHECK19-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK19-NEXT: store i8* null, i8** [[TMP131]], align 4 +// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK19-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK19-NEXT: store i32 [[TMP80]], i32* [[TMP133]], align 4 +// CHECK19-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK19-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* +// CHECK19-NEXT: store i32 [[TMP80]], i32* [[TMP135]], align 4 +// CHECK19-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK19-NEXT: store i8* null, i8** [[TMP136]], align 4 +// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP139:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP137]], i8** [[TMP138]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP140:%.*]] = icmp ne i32 [[TMP139]], 0 +// CHECK19-NEXT: br i1 [[TMP140]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK19: omp_offload.failed17: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -13533,10 +13327,10 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END20]] // CHECK19: omp_if.end20: -// CHECK19-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 -// CHECK19-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) -// CHECK19-NEXT: ret i32 [[TMP152]] +// CHECK19-NEXT: [[TMP141:%.*]] = load i32, i32* [[A]], align 4 +// CHECK19-NEXT: [[TMP142:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP142]]) +// CHECK19-NEXT: ret i32 [[TMP141]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -14287,7 +14081,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK19-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -14317,56 +14110,46 @@ // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK19-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK19-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK19-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK19-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK19-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK19-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK19-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK19-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -14376,15 +14159,15 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_IF_END]] // CHECK19: omp_if.end: -// CHECK19-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK19-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK19-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK19-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK19-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK19-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK19-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK19-NEXT: ret i32 [[ADD3]] // // @@ -14455,7 +14238,7 @@ // CHECK19-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK19-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK19-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -14523,7 +14306,7 @@ // CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK19-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -14560,11 +14343,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14680,11 +14463,11 @@ // CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK19-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14727,11 +14510,11 @@ // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14857,7 +14640,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK20-NEXT: store i32 0, i32* [[A]], align 4 @@ -15018,106 +14800,87 @@ // CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK20-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* // CHECK20-NEXT: store i32 [[TMP78]], i32* [[TMP90]], align 4 -// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP91]], align 4 -// CHECK20-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP92]], align 4 -// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP94]], align 4 -// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP96]], align 4 -// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 40, i64* [[TMP97]], align 4 -// CHECK20-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP98]], align 4 -// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP91]], align 4 +// CHECK20-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP93]], align 4 +// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP95]], align 4 +// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP96]], align 4 +// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP98]], align 4 +// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK20-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP100]], align 4 -// CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP102]], align 4 -// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP103]], align 4 -// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP104]], align 4 -// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP106]], align 4 -// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP108]], align 4 -// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP83]], i64* [[TMP109]], align 4 -// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP110]], align 4 -// CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP112]], align 4 -// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP114]], align 4 -// CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 400, i64* [[TMP115]], align 4 -// CHECK20-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP101]], align 4 +// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP103]], align 4 +// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP105]], align 4 +// CHECK20-NEXT: store i64 [[TMP83]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP106]], align 4 +// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP108]], align 4 +// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP110]], align 4 +// CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP111]], align 4 +// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP113]], align 4 +// CHECK20-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP115]], align 4 +// CHECK20-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK20-NEXT: store i8* null, i8** [[TMP116]], align 4 -// CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK20-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP118]], align 4 -// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP118]], align 4 +// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK20-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP120]], align 4 -// CHECK20-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK20-NEXT: store i64 4, i64* [[TMP121]], align 4 -// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK20-NEXT: store i8* null, i8** [[TMP122]], align 4 -// CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP124]], align 4 -// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 -// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK20-NEXT: store i64 4, i64* [[TMP127]], align 4 -// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK20-NEXT: store i8* null, i8** [[TMP128]], align 4 -// CHECK20-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP130]], align 4 -// CHECK20-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP132]], align 4 -// CHECK20-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK20-NEXT: store i64 [[TMP86]], i64* [[TMP133]], align 4 -// CHECK20-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK20-NEXT: store i8* null, i8** [[TMP134]], align 4 -// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 -// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 -// CHECK20-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK20-NEXT: store i64 12, i64* [[TMP139]], align 4 -// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK20-NEXT: store i8* null, i8** [[TMP140]], align 4 -// CHECK20-NEXT: [[TMP141:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK20-NEXT: [[TMP142:%.*]] = bitcast i8** [[TMP141]] to i32* -// CHECK20-NEXT: store i32 [[TMP80]], i32* [[TMP142]], align 4 -// CHECK20-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK20-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* -// CHECK20-NEXT: store i32 [[TMP80]], i32* [[TMP144]], align 4 -// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK20-NEXT: store i64 4, i64* [[TMP145]], align 4 -// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK20-NEXT: store i8* null, i8** [[TMP146]], align 4 -// CHECK20-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP147]], i8** [[TMP148]], i64* [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK20-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 -// CHECK20-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP120]], align 4 +// CHECK20-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK20-NEXT: store i8* null, i8** [[TMP121]], align 4 +// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP123]], align 4 +// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP125]], align 4 +// CHECK20-NEXT: store i64 [[TMP86]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK20-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK20-NEXT: store i8* null, i8** [[TMP126]], align 4 +// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP128]], align 4 +// CHECK20-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP130]], align 4 +// CHECK20-NEXT: [[TMP131:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK20-NEXT: store i8* null, i8** [[TMP131]], align 4 +// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK20-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK20-NEXT: store i32 [[TMP80]], i32* [[TMP133]], align 4 +// CHECK20-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK20-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* +// CHECK20-NEXT: store i32 [[TMP80]], i32* [[TMP135]], align 4 +// CHECK20-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK20-NEXT: store i8* null, i8** [[TMP136]], align 4 +// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP139:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170.region_id, i32 10, i8** [[TMP137]], i8** [[TMP138]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP140:%.*]] = icmp ne i32 [[TMP139]], 0 +// CHECK20-NEXT: br i1 [[TMP140]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK20: omp_offload.failed17: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -15127,10 +14890,10 @@ // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l170(i32 [[TMP78]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP80]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_IF_END20]] // CHECK20: omp_if.end20: -// CHECK20-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 -// CHECK20-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) -// CHECK20-NEXT: ret i32 [[TMP152]] +// CHECK20-NEXT: [[TMP141:%.*]] = load i32, i32* [[A]], align 4 +// CHECK20-NEXT: [[TMP142:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP142]]) +// CHECK20-NEXT: ret i32 [[TMP141]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -15881,7 +15644,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK20-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -15911,56 +15673,46 @@ // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK20-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK20-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK20-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK20-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK20-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK20-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK20-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK20-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -15970,15 +15722,15 @@ // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l242(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_IF_END]] // CHECK20: omp_if.end: -// CHECK20-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK20-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK20-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK20-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK20-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK20-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK20-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK20-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK20-NEXT: ret i32 [[ADD3]] // // @@ -16049,7 +15801,7 @@ // CHECK20-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l224.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK20-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK20-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -16117,7 +15869,7 @@ // CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l207.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK20-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK20-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -16154,11 +15906,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16274,11 +16026,11 @@ // CHECK20-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK20-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16321,11 +16073,11 @@ // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp b/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp --- a/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp +++ b/clang/test/OpenMP/target_parallel_for_simd_codegen.cpp @@ -333,7 +333,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 @@ -474,106 +473,87 @@ // CHECK1-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK1-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* // CHECK1-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 -// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK1-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 -// CHECK1-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 -// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 40, i64* [[TMP78]], align 8 -// CHECK1-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK1-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 8 +// CHECK1-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP76]], align 8 +// CHECK1-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP77]], align 8 +// CHECK1-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP79]], align 8 +// CHECK1-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK1-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 -// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 -// CHECK1-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK1-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 -// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 -// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 -// CHECK1-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 -// CHECK1-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 -// CHECK1-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 400, i64* [[TMP96]], align 8 -// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK1-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP82]], align 8 +// CHECK1-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP84]], align 8 +// CHECK1-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP86]], align 8 +// CHECK1-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK1-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP87]], align 8 +// CHECK1-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 8 +// CHECK1-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8 +// CHECK1-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP92]], align 8 +// CHECK1-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP94]], align 8 +// CHECK1-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP96]], align 8 +// CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK1-NEXT: store i8* null, i8** [[TMP97]], align 8 -// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK1-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP99]], align 8 -// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP99]], align 8 +// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK1-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP101]], align 8 -// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK1-NEXT: store i64 8, i64* [[TMP102]], align 8 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK1-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 -// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 -// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK1-NEXT: store i64 8, i64* [[TMP108]], align 8 -// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK1-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 -// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 -// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK1-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 -// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK1-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 -// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 -// CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16, i64* [[TMP120]], align 8 -// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK1-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* -// CHECK1-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 -// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* -// CHECK1-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 -// CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK1-NEXT: store i64 4, i64* [[TMP126]], align 8 -// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK1-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK1-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 -// CHECK1-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP101]], align 8 +// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK1-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP104]], align 8 +// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP106]], align 8 +// CHECK1-NEXT: store i64 [[TMP67]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK1-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 8 +// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP111]], align 8 +// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK1-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK1-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* +// CHECK1-NEXT: store i64 [[TMP63]], i64* [[TMP114]], align 8 +// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i64* +// CHECK1-NEXT: store i64 [[TMP63]], i64* [[TMP116]], align 8 +// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK1-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP120:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP118]], i8** [[TMP119]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP121:%.*]] = icmp ne i32 [[TMP120]], 0 +// CHECK1-NEXT: br i1 [[TMP121]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK1: omp_offload.failed23: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -583,10 +563,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END26]] // CHECK1: omp_if.end26: -// CHECK1-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 -// CHECK1-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) -// CHECK1-NEXT: ret i32 [[TMP133]] +// CHECK1-NEXT: [[TMP122:%.*]] = load i32, i32* [[A]], align 4 +// CHECK1-NEXT: [[TMP123:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP123]]) +// CHECK1-NEXT: ret i32 [[TMP122]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -1314,7 +1294,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -1345,56 +1324,46 @@ // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK1-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK1-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1404,15 +1373,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK1-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK1-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK1-NEXT: ret i32 [[ADD4]] // // @@ -1484,7 +1453,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1553,7 +1522,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1592,11 +1561,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1722,11 +1691,11 @@ // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1772,11 +1741,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1911,7 +1880,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 @@ -2052,106 +2020,87 @@ // CHECK2-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK2-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* // CHECK2-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 -// CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK2-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 -// CHECK2-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 -// CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 40, i64* [[TMP78]], align 8 -// CHECK2-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK2-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 8 +// CHECK2-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP76]], align 8 +// CHECK2-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP77]], align 8 +// CHECK2-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP79]], align 8 +// CHECK2-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK2-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 -// CHECK2-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 -// CHECK2-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK2-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 -// CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 -// CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 -// CHECK2-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK2-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 -// CHECK2-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 -// CHECK2-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 400, i64* [[TMP96]], align 8 -// CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK2-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP82]], align 8 +// CHECK2-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP84]], align 8 +// CHECK2-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP86]], align 8 +// CHECK2-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK2-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP87]], align 8 +// CHECK2-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 8 +// CHECK2-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8 +// CHECK2-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP92]], align 8 +// CHECK2-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP94]], align 8 +// CHECK2-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP96]], align 8 +// CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK2-NEXT: store i8* null, i8** [[TMP97]], align 8 -// CHECK2-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK2-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP99]], align 8 -// CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP99]], align 8 +// CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK2-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP101]], align 8 -// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK2-NEXT: store i64 8, i64* [[TMP102]], align 8 -// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK2-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 -// CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 -// CHECK2-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK2-NEXT: store i64 8, i64* [[TMP108]], align 8 -// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK2-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 -// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 -// CHECK2-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK2-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 -// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK2-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK2-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 -// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 -// CHECK2-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK2-NEXT: store i64 16, i64* [[TMP120]], align 8 -// CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK2-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK2-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* -// CHECK2-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 -// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* -// CHECK2-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 -// CHECK2-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK2-NEXT: store i64 4, i64* [[TMP126]], align 8 -// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK2-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK2-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK2-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 -// CHECK2-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP101]], align 8 +// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK2-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP104]], align 8 +// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP106]], align 8 +// CHECK2-NEXT: store i64 [[TMP67]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK2-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK2-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 8 +// CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP111]], align 8 +// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK2-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK2-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* +// CHECK2-NEXT: store i64 [[TMP63]], i64* [[TMP114]], align 8 +// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i64* +// CHECK2-NEXT: store i64 [[TMP63]], i64* [[TMP116]], align 8 +// CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK2-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP120:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP118]], i8** [[TMP119]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP121:%.*]] = icmp ne i32 [[TMP120]], 0 +// CHECK2-NEXT: br i1 [[TMP121]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK2: omp_offload.failed23: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -2161,10 +2110,10 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END26]] // CHECK2: omp_if.end26: -// CHECK2-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 -// CHECK2-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) -// CHECK2-NEXT: ret i32 [[TMP133]] +// CHECK2-NEXT: [[TMP122:%.*]] = load i32, i32* [[A]], align 4 +// CHECK2-NEXT: [[TMP123:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP123]]) +// CHECK2-NEXT: ret i32 [[TMP122]] // // // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -2892,7 +2841,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -2923,56 +2871,46 @@ // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK2-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK2-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK2-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2982,15 +2920,15 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END]] // CHECK2: omp_if.end: -// CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK2-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK2-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK2-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK2-NEXT: ret i32 [[ADD4]] // // @@ -3062,7 +3000,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK2-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK2-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3131,7 +3069,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3170,11 +3108,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3300,11 +3238,11 @@ // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK2-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3350,11 +3288,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3488,7 +3426,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: store i32 0, i32* [[A]], align 4 @@ -3620,106 +3557,87 @@ // CHECK3-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK3-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* // CHECK3-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 -// CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 4, i64* [[TMP70]], align 4 -// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP71]], align 4 -// CHECK3-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 -// CHECK3-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 -// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 40, i64* [[TMP76]], align 4 -// CHECK3-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP77]], align 4 -// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP70]], align 4 +// CHECK3-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP72]], align 4 +// CHECK3-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 4 +// CHECK3-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP75]], align 4 +// CHECK3-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP77]], align 4 +// CHECK3-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK3-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 -// CHECK3-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 -// CHECK3-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP82]], align 4 -// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP83]], align 4 -// CHECK3-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 -// CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 -// CHECK3-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 -// CHECK3-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP89]], align 4 -// CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 -// CHECK3-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 -// CHECK3-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 400, i64* [[TMP94]], align 4 -// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP80]], align 4 +// CHECK3-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP82]], align 4 +// CHECK3-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP84]], align 4 +// CHECK3-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK3-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP85]], align 4 +// CHECK3-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP87]], align 4 +// CHECK3-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4 +// CHECK3-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP90]], align 4 +// CHECK3-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP92]], align 4 +// CHECK3-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP94]], align 4 +// CHECK3-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK3-NEXT: store i8* null, i8** [[TMP95]], align 4 -// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK3-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP97]], align 4 -// CHECK3-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP97]], align 4 +// CHECK3-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK3-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP99]], align 4 -// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK3-NEXT: store i64 4, i64* [[TMP100]], align 4 -// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK3-NEXT: store i8* null, i8** [[TMP101]], align 4 -// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 -// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 -// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK3-NEXT: store i64 4, i64* [[TMP106]], align 4 -// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK3-NEXT: store i8* null, i8** [[TMP107]], align 4 -// CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 -// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 -// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK3-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 -// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK3-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 -// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 -// CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK3-NEXT: store i64 12, i64* [[TMP118]], align 4 -// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK3-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* -// CHECK3-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 -// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* -// CHECK3-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 -// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK3-NEXT: store i64 4, i64* [[TMP124]], align 4 -// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK3-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK3-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK3-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK3-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP99]], align 4 +// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK3-NEXT: store i8* null, i8** [[TMP100]], align 4 +// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP102]], align 4 +// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP104]], align 4 +// CHECK3-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK3-NEXT: store i8* null, i8** [[TMP105]], align 4 +// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP107]], align 4 +// CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 4 +// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK3-NEXT: store i8* null, i8** [[TMP110]], align 4 +// CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK3-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32* +// CHECK3-NEXT: store i32 [[TMP59]], i32* [[TMP112]], align 4 +// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK3-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* +// CHECK3-NEXT: store i32 [[TMP59]], i32* [[TMP114]], align 4 +// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK3-NEXT: store i8* null, i8** [[TMP115]], align 4 +// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP116]], i8** [[TMP117]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0 +// CHECK3-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK3: omp_offload.failed17: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -3729,10 +3647,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END20]] // CHECK3: omp_if.end20: -// CHECK3-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 -// CHECK3-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) -// CHECK3-NEXT: ret i32 [[TMP131]] +// CHECK3-NEXT: [[TMP120:%.*]] = load i32, i32* [[A]], align 4 +// CHECK3-NEXT: [[TMP121:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP121]]) +// CHECK3-NEXT: ret i32 [[TMP120]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -4440,7 +4358,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -4470,56 +4387,46 @@ // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK3-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK3-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK3-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4529,15 +4436,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK3-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK3-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK3-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -4608,7 +4515,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -4676,7 +4583,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -4713,11 +4620,11 @@ // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4840,11 +4747,11 @@ // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4887,11 +4794,11 @@ // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5024,7 +4931,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK4-NEXT: store i32 0, i32* [[A]], align 4 @@ -5156,106 +5062,87 @@ // CHECK4-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK4-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* // CHECK4-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 -// CHECK4-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 4, i64* [[TMP70]], align 4 -// CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP71]], align 4 -// CHECK4-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 -// CHECK4-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 -// CHECK4-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 40, i64* [[TMP76]], align 4 -// CHECK4-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP77]], align 4 -// CHECK4-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP70]], align 4 +// CHECK4-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP72]], align 4 +// CHECK4-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 4 +// CHECK4-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP75]], align 4 +// CHECK4-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP77]], align 4 +// CHECK4-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK4-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 -// CHECK4-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 -// CHECK4-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP82]], align 4 -// CHECK4-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP83]], align 4 -// CHECK4-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 -// CHECK4-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 -// CHECK4-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 -// CHECK4-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP89]], align 4 -// CHECK4-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 -// CHECK4-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 -// CHECK4-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 400, i64* [[TMP94]], align 4 -// CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP80]], align 4 +// CHECK4-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP82]], align 4 +// CHECK4-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP84]], align 4 +// CHECK4-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK4-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP85]], align 4 +// CHECK4-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP87]], align 4 +// CHECK4-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4 +// CHECK4-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP90]], align 4 +// CHECK4-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP92]], align 4 +// CHECK4-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP94]], align 4 +// CHECK4-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK4-NEXT: store i8* null, i8** [[TMP95]], align 4 -// CHECK4-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK4-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP97]], align 4 -// CHECK4-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP97]], align 4 +// CHECK4-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK4-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP99]], align 4 -// CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK4-NEXT: store i64 4, i64* [[TMP100]], align 4 -// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK4-NEXT: store i8* null, i8** [[TMP101]], align 4 -// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 -// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 -// CHECK4-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK4-NEXT: store i64 4, i64* [[TMP106]], align 4 -// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK4-NEXT: store i8* null, i8** [[TMP107]], align 4 -// CHECK4-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 -// CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 -// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK4-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 -// CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK4-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK4-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 -// CHECK4-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 -// CHECK4-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK4-NEXT: store i64 12, i64* [[TMP118]], align 4 -// CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK4-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK4-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* -// CHECK4-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 -// CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* -// CHECK4-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 -// CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK4-NEXT: store i64 4, i64* [[TMP124]], align 4 -// CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK4-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK4-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK4-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK4-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP99]], align 4 +// CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK4-NEXT: store i8* null, i8** [[TMP100]], align 4 +// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP102]], align 4 +// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP104]], align 4 +// CHECK4-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK4-NEXT: store i8* null, i8** [[TMP105]], align 4 +// CHECK4-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP107]], align 4 +// CHECK4-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 4 +// CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK4-NEXT: store i8* null, i8** [[TMP110]], align 4 +// CHECK4-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK4-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32* +// CHECK4-NEXT: store i32 [[TMP59]], i32* [[TMP112]], align 4 +// CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK4-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* +// CHECK4-NEXT: store i32 [[TMP59]], i32* [[TMP114]], align 4 +// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK4-NEXT: store i8* null, i8** [[TMP115]], align 4 +// CHECK4-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP116]], i8** [[TMP117]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0 +// CHECK4-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK4: omp_offload.failed17: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -5265,10 +5152,10 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END20]] // CHECK4: omp_if.end20: -// CHECK4-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 -// CHECK4-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) -// CHECK4-NEXT: ret i32 [[TMP131]] +// CHECK4-NEXT: [[TMP120:%.*]] = load i32, i32* [[A]], align 4 +// CHECK4-NEXT: [[TMP121:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP121]]) +// CHECK4-NEXT: ret i32 [[TMP120]] // // // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -5976,7 +5863,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -6006,56 +5892,46 @@ // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK4-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK4-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK4-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK4-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6065,15 +5941,15 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l216(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END]] // CHECK4: omp_if.end: -// CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK4-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK4-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK4-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK4-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK4-NEXT: ret i32 [[ADD3]] // // @@ -6144,7 +6020,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK4-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK4-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6212,7 +6088,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6249,11 +6125,11 @@ // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6376,11 +6252,11 @@ // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK4-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6423,11 +6299,11 @@ // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6561,7 +6437,6 @@ // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK5-NEXT: store i32 0, i32* [[A]], align 4 @@ -6702,106 +6577,87 @@ // CHECK5-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK5-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* // CHECK5-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 -// CHECK5-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK5-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK5-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK5-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK5-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 -// CHECK5-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** -// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 -// CHECK5-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK5-NEXT: store i64 40, i64* [[TMP78]], align 8 -// CHECK5-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK5-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK5-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK5-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK5-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 8 +// CHECK5-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [10 x float]** +// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP76]], align 8 +// CHECK5-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK5-NEXT: store i8* null, i8** [[TMP77]], align 8 +// CHECK5-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP79]], align 8 +// CHECK5-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK5-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 -// CHECK5-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK5-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 -// CHECK5-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK5-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK5-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK5-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK5-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK5-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 -// CHECK5-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** -// CHECK5-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 -// CHECK5-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK5-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 -// CHECK5-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK5-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK5-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 -// CHECK5-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** -// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 -// CHECK5-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK5-NEXT: store i64 400, i64* [[TMP96]], align 8 -// CHECK5-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK5-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK5-NEXT: store i8* null, i8** [[TMP82]], align 8 +// CHECK5-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK5-NEXT: store float* [[VLA]], float** [[TMP84]], align 8 +// CHECK5-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to float** +// CHECK5-NEXT: store float* [[VLA]], float** [[TMP86]], align 8 +// CHECK5-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK5-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK5-NEXT: store i8* null, i8** [[TMP87]], align 8 +// CHECK5-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 8 +// CHECK5-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** +// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8 +// CHECK5-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK5-NEXT: store i8* null, i8** [[TMP92]], align 8 +// CHECK5-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK5-NEXT: store i64 5, i64* [[TMP94]], align 8 +// CHECK5-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64* +// CHECK5-NEXT: store i64 5, i64* [[TMP96]], align 8 +// CHECK5-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK5-NEXT: store i8* null, i8** [[TMP97]], align 8 -// CHECK5-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK5-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* -// CHECK5-NEXT: store i64 5, i64* [[TMP99]], align 8 -// CHECK5-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP99]], align 8 +// CHECK5-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK5-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* -// CHECK5-NEXT: store i64 5, i64* [[TMP101]], align 8 -// CHECK5-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK5-NEXT: store i64 8, i64* [[TMP102]], align 8 -// CHECK5-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK5-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK5-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK5-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 -// CHECK5-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK5-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 -// CHECK5-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK5-NEXT: store i64 8, i64* [[TMP108]], align 8 -// CHECK5-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK5-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK5-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK5-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 -// CHECK5-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK5-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** -// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 -// CHECK5-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK5-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 -// CHECK5-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK5-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK5-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 -// CHECK5-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** -// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 -// CHECK5-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK5-NEXT: store i64 16, i64* [[TMP120]], align 8 -// CHECK5-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK5-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK5-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK5-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* -// CHECK5-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 -// CHECK5-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK5-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* -// CHECK5-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 -// CHECK5-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK5-NEXT: store i64 4, i64* [[TMP126]], align 8 -// CHECK5-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK5-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK5-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK5-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 -// CHECK5-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP101]], align 8 +// CHECK5-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK5-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK5-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK5-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP104]], align 8 +// CHECK5-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK5-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double** +// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP106]], align 8 +// CHECK5-NEXT: store i64 [[TMP67]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK5-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK5-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK5-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK5-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 8 +// CHECK5-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK5-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to %struct.TT** +// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP111]], align 8 +// CHECK5-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK5-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK5-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK5-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* +// CHECK5-NEXT: store i64 [[TMP63]], i64* [[TMP114]], align 8 +// CHECK5-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK5-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i64* +// CHECK5-NEXT: store i64 [[TMP63]], i64* [[TMP116]], align 8 +// CHECK5-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK5-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK5-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP120:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP118]], i8** [[TMP119]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK5-NEXT: [[TMP121:%.*]] = icmp ne i32 [[TMP120]], 0 +// CHECK5-NEXT: br i1 [[TMP121]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK5: omp_offload.failed23: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -6811,10 +6667,10 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END26]] // CHECK5: omp_if.end26: -// CHECK5-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 -// CHECK5-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) -// CHECK5-NEXT: ret i32 [[TMP133]] +// CHECK5-NEXT: [[TMP122:%.*]] = load i32, i32* [[A]], align 4 +// CHECK5-NEXT: [[TMP123:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP123]]) +// CHECK5-NEXT: ret i32 [[TMP122]] // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -7544,7 +7400,6 @@ // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK5-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -7585,69 +7440,57 @@ // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK5-NEXT: store double* [[A]], double** [[TMP16]], align 8 -// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK5-NEXT: store i64 8, i64* [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* -// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* -// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK5-NEXT: store i64 4, i64* [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK5-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK5-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK5-NEXT: store i64 2, i64* [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* // CHECK5-NEXT: store i64 2, i64* [[TMP26]], align 8 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK5-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* -// CHECK5-NEXT: store i64 2, i64* [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK5-NEXT: store i64 8, i64* [[TMP29]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK5-NEXT: store i8* null, i8** [[TMP30]], align 8 -// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* -// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP32]], align 8 -// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* -// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP34]], align 8 -// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK5-NEXT: store i64 8, i64* [[TMP35]], align 8 -// CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK5-NEXT: store i8* null, i8** [[TMP36]], align 8 -// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 8 -// CHECK5-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 8 -// CHECK5-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK5-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 8 -// CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK5-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* +// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 +// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* +// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 +// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK5-NEXT: store i8* null, i8** [[TMP32]], align 8 +// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 8 +// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 8 +// CHECK5-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK5-NEXT: store i8* null, i8** [[TMP37]], align 8 +// CHECK5-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* +// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP39]], align 8 +// CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* +// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP41]], align 8 +// CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK5-NEXT: store i8* null, i8** [[TMP42]], align 8 -// CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK5-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64* -// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP44]], align 8 -// CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK5-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* -// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP46]], align 8 -// CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK5-NEXT: store i64 1, i64* [[TMP47]], align 8 -// CHECK5-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK5-NEXT: store i8* null, i8** [[TMP48]], align 8 -// CHECK5-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 -// CHECK5-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP52]] to i1 -// CHECK5-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) -// CHECK5-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 -// CHECK5-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP45:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 +// CHECK5-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP45]] to i1 +// CHECK5-NEXT: [[TMP46:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP46]]) +// CHECK5-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0 +// CHECK5-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -7657,15 +7500,15 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP56:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP56]] +// CHECK5-NEXT: [[TMP49:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP49]] // CHECK5-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK5-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2 -// CHECK5-NEXT: [[CONV7:%.*]] = sext i16 [[TMP57]] to i32 -// CHECK5-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 -// CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP58]] -// CHECK5-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) +// CHECK5-NEXT: [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2 +// CHECK5-NEXT: [[CONV7:%.*]] = sext i16 [[TMP50]] to i32 +// CHECK5-NEXT: [[TMP51:%.*]] = load i32, i32* [[B]], align 4 +// CHECK5-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP51]] +// CHECK5-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) // CHECK5-NEXT: ret i32 [[ADD8]] // // @@ -7737,7 +7580,7 @@ // CHECK5-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK5-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK5-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK5-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: @@ -7806,7 +7649,7 @@ // CHECK5-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK5-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK5-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK5-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: @@ -7862,20 +7705,20 @@ // CHECK5-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK5-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK5: omp_if.then: -// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) +// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) // CHECK5-NEXT: br label [[OMP_IF_END:%.*]] // CHECK5: omp_if.else: // CHECK5-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK5-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK5-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 -// CHECK5-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]] +// CHECK5-NEXT: call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]] // CHECK5-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: // CHECK5-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8064,11 +7907,11 @@ // CHECK5-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK5-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK5-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK5-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8114,11 +7957,11 @@ // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK5-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8253,7 +8096,6 @@ // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [10 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [10 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [10 x i8*], align 8 -// CHECK6-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK6-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK6-NEXT: store i32 0, i32* [[A]], align 4 @@ -8394,106 +8236,87 @@ // CHECK6-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK6-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* // CHECK6-NEXT: store i64 [[TMP61]], i64* [[TMP71]], align 8 -// CHECK6-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK6-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK6-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK6-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK6-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 8 -// CHECK6-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to [10 x float]** -// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP77]], align 8 -// CHECK6-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK6-NEXT: store i64 40, i64* [[TMP78]], align 8 -// CHECK6-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK6-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK6-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK6-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK6-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 8 +// CHECK6-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to [10 x float]** +// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP76]], align 8 +// CHECK6-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK6-NEXT: store i8* null, i8** [[TMP77]], align 8 +// CHECK6-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP79]], align 8 +// CHECK6-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK6-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* // CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP81]], align 8 -// CHECK6-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK6-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP83]], align 8 -// CHECK6-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK6-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK6-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK6-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK6-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK6-NEXT: store float* [[VLA]], float** [[TMP87]], align 8 -// CHECK6-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to float** -// CHECK6-NEXT: store float* [[VLA]], float** [[TMP89]], align 8 -// CHECK6-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK6-NEXT: store i64 [[TMP65]], i64* [[TMP90]], align 8 -// CHECK6-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 -// CHECK6-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK6-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 8 -// CHECK6-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to [5 x [10 x double]]** -// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP95]], align 8 -// CHECK6-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK6-NEXT: store i64 400, i64* [[TMP96]], align 8 -// CHECK6-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK6-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK6-NEXT: store i8* null, i8** [[TMP82]], align 8 +// CHECK6-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK6-NEXT: store float* [[VLA]], float** [[TMP84]], align 8 +// CHECK6-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to float** +// CHECK6-NEXT: store float* [[VLA]], float** [[TMP86]], align 8 +// CHECK6-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK6-NEXT: [[TMP87:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 +// CHECK6-NEXT: store i8* null, i8** [[TMP87]], align 8 +// CHECK6-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 8 +// CHECK6-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** +// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 8 +// CHECK6-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 4 +// CHECK6-NEXT: store i8* null, i8** [[TMP92]], align 8 +// CHECK6-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK6-NEXT: store i64 5, i64* [[TMP94]], align 8 +// CHECK6-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i64* +// CHECK6-NEXT: store i64 5, i64* [[TMP96]], align 8 +// CHECK6-NEXT: [[TMP97:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 // CHECK6-NEXT: store i8* null, i8** [[TMP97]], align 8 -// CHECK6-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK6-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* -// CHECK6-NEXT: store i64 5, i64* [[TMP99]], align 8 -// CHECK6-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP99]], align 8 +// CHECK6-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK6-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* -// CHECK6-NEXT: store i64 5, i64* [[TMP101]], align 8 -// CHECK6-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK6-NEXT: store i64 8, i64* [[TMP102]], align 8 -// CHECK6-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 5 -// CHECK6-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK6-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK6-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP105]], align 8 -// CHECK6-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK6-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP107]], align 8 -// CHECK6-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK6-NEXT: store i64 8, i64* [[TMP108]], align 8 -// CHECK6-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 -// CHECK6-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK6-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK6-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP111]], align 8 -// CHECK6-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK6-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to double** -// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP113]], align 8 -// CHECK6-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK6-NEXT: store i64 [[TMP67]], i64* [[TMP114]], align 8 -// CHECK6-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 -// CHECK6-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK6-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK6-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 8 -// CHECK6-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK6-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to %struct.TT** -// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP119]], align 8 -// CHECK6-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK6-NEXT: store i64 16, i64* [[TMP120]], align 8 -// CHECK6-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 -// CHECK6-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK6-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 -// CHECK6-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* -// CHECK6-NEXT: store i64 [[TMP63]], i64* [[TMP123]], align 8 -// CHECK6-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 -// CHECK6-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* -// CHECK6-NEXT: store i64 [[TMP63]], i64* [[TMP125]], align 8 -// CHECK6-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK6-NEXT: store i64 4, i64* [[TMP126]], align 8 -// CHECK6-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 -// CHECK6-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK6-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP129:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP131:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP128]], i8** [[TMP129]], i64* [[TMP130]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK6-NEXT: [[TMP132:%.*]] = icmp ne i32 [[TMP131]], 0 -// CHECK6-NEXT: br i1 [[TMP132]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP101]], align 8 +// CHECK6-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 6 +// CHECK6-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK6-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK6-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP104]], align 8 +// CHECK6-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK6-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to double** +// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP106]], align 8 +// CHECK6-NEXT: store i64 [[TMP67]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 8 +// CHECK6-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 7 +// CHECK6-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK6-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK6-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 8 +// CHECK6-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK6-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to %struct.TT** +// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP111]], align 8 +// CHECK6-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 8 +// CHECK6-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK6-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 9 +// CHECK6-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* +// CHECK6-NEXT: store i64 [[TMP63]], i64* [[TMP114]], align 8 +// CHECK6-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 9 +// CHECK6-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i64* +// CHECK6-NEXT: store i64 [[TMP63]], i64* [[TMP116]], align 8 +// CHECK6-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 9 +// CHECK6-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK6-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK6-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK6-NEXT: [[TMP120:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP118]], i8** [[TMP119]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK6-NEXT: [[TMP121:%.*]] = icmp ne i32 [[TMP120]], 0 +// CHECK6-NEXT: br i1 [[TMP121]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK6: omp_offload.failed23: // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -8503,10 +8326,10 @@ // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i64 [[TMP61]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP63]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_IF_END26]] // CHECK6: omp_if.end26: -// CHECK6-NEXT: [[TMP133:%.*]] = load i32, i32* [[A]], align 4 -// CHECK6-NEXT: [[TMP134:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP134]]) -// CHECK6-NEXT: ret i32 [[TMP133]] +// CHECK6-NEXT: [[TMP122:%.*]] = load i32, i32* [[A]], align 4 +// CHECK6-NEXT: [[TMP123:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP123]]) +// CHECK6-NEXT: ret i32 [[TMP122]] // // // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -9236,7 +9059,6 @@ // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8 -// CHECK6-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8 // CHECK6-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK6-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -9277,69 +9099,57 @@ // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK6-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK6-NEXT: store double* [[A]], double** [[TMP16]], align 8 -// CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK6-NEXT: store i64 8, i64* [[TMP17]], align 8 -// CHECK6-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK6-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK6-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* -// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP20]], align 8 -// CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* -// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP22]], align 8 -// CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK6-NEXT: store i64 4, i64* [[TMP23]], align 8 -// CHECK6-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK6-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK6-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK6-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 8 +// CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP21]], align 8 +// CHECK6-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK6-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK6-NEXT: store i64 2, i64* [[TMP24]], align 8 +// CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK6-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* // CHECK6-NEXT: store i64 2, i64* [[TMP26]], align 8 -// CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK6-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* -// CHECK6-NEXT: store i64 2, i64* [[TMP28]], align 8 -// CHECK6-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK6-NEXT: store i64 8, i64* [[TMP29]], align 8 -// CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK6-NEXT: store i8* null, i8** [[TMP30]], align 8 -// CHECK6-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* -// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP32]], align 8 -// CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* -// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP34]], align 8 -// CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK6-NEXT: store i64 8, i64* [[TMP35]], align 8 -// CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK6-NEXT: store i8* null, i8** [[TMP36]], align 8 -// CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 8 -// CHECK6-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 8 -// CHECK6-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK6-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 8 -// CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK6-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK6-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* +// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 +// CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* +// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 +// CHECK6-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK6-NEXT: store i8* null, i8** [[TMP32]], align 8 +// CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 8 +// CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 8 +// CHECK6-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 4), align 8 +// CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK6-NEXT: store i8* null, i8** [[TMP37]], align 8 +// CHECK6-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* +// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP39]], align 8 +// CHECK6-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* +// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP41]], align 8 +// CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK6-NEXT: store i8* null, i8** [[TMP42]], align 8 -// CHECK6-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK6-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64* -// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP44]], align 8 -// CHECK6-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK6-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* -// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP46]], align 8 -// CHECK6-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK6-NEXT: store i64 1, i64* [[TMP47]], align 8 -// CHECK6-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK6-NEXT: store i8* null, i8** [[TMP48]], align 8 -// CHECK6-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 -// CHECK6-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP52]] to i1 -// CHECK6-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) -// CHECK6-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 -// CHECK6-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK6-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK6-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK6-NEXT: [[TMP45:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 +// CHECK6-NEXT: [[TOBOOL5:%.*]] = trunc i8 [[TMP45]] to i1 +// CHECK6-NEXT: [[TMP46:%.*]] = select i1 [[TOBOOL5]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP46]]) +// CHECK6-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0 +// CHECK6-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK6: omp_offload.failed: // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -9349,15 +9159,15 @@ // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_IF_END]] // CHECK6: omp_if.end: -// CHECK6-NEXT: [[TMP56:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP56]] +// CHECK6-NEXT: [[TMP49:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP49]] // CHECK6-NEXT: [[ARRAYIDX6:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK6-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2 -// CHECK6-NEXT: [[CONV7:%.*]] = sext i16 [[TMP57]] to i32 -// CHECK6-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 -// CHECK6-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP58]] -// CHECK6-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) +// CHECK6-NEXT: [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX6]], align 2 +// CHECK6-NEXT: [[CONV7:%.*]] = sext i16 [[TMP50]] to i32 +// CHECK6-NEXT: [[TMP51:%.*]] = load i32, i32* [[B]], align 4 +// CHECK6-NEXT: [[ADD8:%.*]] = add nsw i32 [[CONV7]], [[TMP51]] +// CHECK6-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) // CHECK6-NEXT: ret i32 [[ADD8]] // // @@ -9429,7 +9239,7 @@ // CHECK6-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK6-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK6-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK6-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK6-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK6: omp_offload.failed: @@ -9498,7 +9308,7 @@ // CHECK6-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK6-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK6-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK6-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK6: omp_offload.failed: @@ -9554,20 +9364,20 @@ // CHECK6-NEXT: [[TOBOOL6:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK6-NEXT: br i1 [[TOBOOL6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK6: omp_if.then: -// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) +// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) // CHECK6-NEXT: br label [[OMP_IF_END:%.*]] // CHECK6: omp_if.else: // CHECK6-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK6-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK6-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 -// CHECK6-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]] +// CHECK6-NEXT: call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i64 [[TMP6]], i64 [[TMP2]], i64 [[TMP3]], i16* [[TMP4]], i64 [[TMP8]]) #[[ATTR4]] // CHECK6-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK6-NEXT: br label [[OMP_IF_END]] // CHECK6: omp_if.end: // CHECK6-NEXT: ret void // // -// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK6-NEXT: entry: // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9756,11 +9566,11 @@ // CHECK6-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK6-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK6-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK6-NEXT: ret void // // -// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK6-NEXT: entry: // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9806,11 +9616,11 @@ // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK6-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK6-NEXT: ret void // // -// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK6-NEXT: entry: // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9944,7 +9754,6 @@ // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK7-NEXT: store i32 0, i32* [[A]], align 4 @@ -10076,106 +9885,87 @@ // CHECK7-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK7-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* // CHECK7-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 -// CHECK7-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK7-NEXT: store i64 4, i64* [[TMP70]], align 4 -// CHECK7-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK7-NEXT: store i8* null, i8** [[TMP71]], align 4 -// CHECK7-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** -// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 -// CHECK7-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 -// CHECK7-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK7-NEXT: store i64 40, i64* [[TMP76]], align 4 -// CHECK7-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK7-NEXT: store i8* null, i8** [[TMP77]], align 4 -// CHECK7-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK7-NEXT: store i8* null, i8** [[TMP70]], align 4 +// CHECK7-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [10 x float]** +// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP72]], align 4 +// CHECK7-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 4 +// CHECK7-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK7-NEXT: store i8* null, i8** [[TMP75]], align 4 +// CHECK7-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP77]], align 4 +// CHECK7-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK7-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 -// CHECK7-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* -// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 -// CHECK7-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK7-NEXT: store i64 4, i64* [[TMP82]], align 4 -// CHECK7-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK7-NEXT: store i8* null, i8** [[TMP83]], align 4 -// CHECK7-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** -// CHECK7-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 -// CHECK7-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK7-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 -// CHECK7-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK7-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 -// CHECK7-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK7-NEXT: store i8* null, i8** [[TMP89]], align 4 -// CHECK7-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** -// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 -// CHECK7-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 -// CHECK7-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK7-NEXT: store i64 400, i64* [[TMP94]], align 4 -// CHECK7-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK7-NEXT: store i8* null, i8** [[TMP80]], align 4 +// CHECK7-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to float** +// CHECK7-NEXT: store float* [[VLA]], float** [[TMP82]], align 4 +// CHECK7-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK7-NEXT: store float* [[VLA]], float** [[TMP84]], align 4 +// CHECK7-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK7-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK7-NEXT: store i8* null, i8** [[TMP85]], align 4 +// CHECK7-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to [5 x [10 x double]]** +// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP87]], align 4 +// CHECK7-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4 +// CHECK7-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK7-NEXT: store i8* null, i8** [[TMP90]], align 4 +// CHECK7-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* +// CHECK7-NEXT: store i32 5, i32* [[TMP92]], align 4 +// CHECK7-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32* +// CHECK7-NEXT: store i32 5, i32* [[TMP94]], align 4 +// CHECK7-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK7-NEXT: store i8* null, i8** [[TMP95]], align 4 -// CHECK7-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK7-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* -// CHECK7-NEXT: store i32 5, i32* [[TMP97]], align 4 -// CHECK7-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP97]], align 4 +// CHECK7-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK7-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* -// CHECK7-NEXT: store i32 5, i32* [[TMP99]], align 4 -// CHECK7-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK7-NEXT: store i64 4, i64* [[TMP100]], align 4 -// CHECK7-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK7-NEXT: store i8* null, i8** [[TMP101]], align 4 -// CHECK7-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK7-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* -// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 -// CHECK7-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK7-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* -// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 -// CHECK7-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK7-NEXT: store i64 4, i64* [[TMP106]], align 4 -// CHECK7-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK7-NEXT: store i8* null, i8** [[TMP107]], align 4 -// CHECK7-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK7-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** -// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 -// CHECK7-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK7-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 -// CHECK7-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK7-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 -// CHECK7-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK7-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK7-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK7-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** -// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 -// CHECK7-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK7-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 -// CHECK7-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK7-NEXT: store i64 12, i64* [[TMP118]], align 4 -// CHECK7-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK7-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK7-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK7-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* -// CHECK7-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 -// CHECK7-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK7-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* -// CHECK7-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 -// CHECK7-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK7-NEXT: store i64 4, i64* [[TMP124]], align 4 -// CHECK7-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK7-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK7-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK7-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK7-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP99]], align 4 +// CHECK7-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK7-NEXT: store i8* null, i8** [[TMP100]], align 4 +// CHECK7-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK7-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to double** +// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP102]], align 4 +// CHECK7-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK7-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP104]], align 4 +// CHECK7-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK7-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK7-NEXT: store i8* null, i8** [[TMP105]], align 4 +// CHECK7-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK7-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to %struct.TT** +// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP107]], align 4 +// CHECK7-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK7-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 4 +// CHECK7-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK7-NEXT: store i8* null, i8** [[TMP110]], align 4 +// CHECK7-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK7-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32* +// CHECK7-NEXT: store i32 [[TMP59]], i32* [[TMP112]], align 4 +// CHECK7-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK7-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* +// CHECK7-NEXT: store i32 [[TMP59]], i32* [[TMP114]], align 4 +// CHECK7-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK7-NEXT: store i8* null, i8** [[TMP115]], align 4 +// CHECK7-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP116]], i8** [[TMP117]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK7-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0 +// CHECK7-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK7: omp_offload.failed17: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -10185,10 +9975,10 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END20]] // CHECK7: omp_if.end20: -// CHECK7-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 -// CHECK7-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) -// CHECK7-NEXT: ret i32 [[TMP131]] +// CHECK7-NEXT: [[TMP120:%.*]] = load i32, i32* [[A]], align 4 +// CHECK7-NEXT: [[TMP121:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP121]]) +// CHECK7-NEXT: ret i32 [[TMP120]] // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -10898,7 +10688,6 @@ // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4 -// CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK7-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -10938,69 +10727,57 @@ // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK7-NEXT: store double* [[A]], double** [[TMP16]], align 4 -// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK7-NEXT: store i64 8, i64* [[TMP17]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK7-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP20]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* -// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP22]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK7-NEXT: store i64 4, i64* [[TMP23]], align 4 -// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK7-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK7-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP19]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP21]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK7-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK7-NEXT: store i32 2, i32* [[TMP24]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* // CHECK7-NEXT: store i32 2, i32* [[TMP26]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* -// CHECK7-NEXT: store i32 2, i32* [[TMP28]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK7-NEXT: store i64 4, i64* [[TMP29]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK7-NEXT: store i8* null, i8** [[TMP30]], align 4 -// CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* -// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 -// CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK7-NEXT: store i64 4, i64* [[TMP35]], align 4 -// CHECK7-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK7-NEXT: store i8* null, i8** [[TMP36]], align 4 -// CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 4 -// CHECK7-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 4 -// CHECK7-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK7-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 4 -// CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK7-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* +// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 +// CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK7-NEXT: store i8* null, i8** [[TMP32]], align 4 +// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 4 +// CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 4 +// CHECK7-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK7-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK7-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP39]], align 4 +// CHECK7-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP41]], align 4 +// CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK7-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK7-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK7-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP44]], align 4 -// CHECK7-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK7-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP46]], align 4 -// CHECK7-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK7-NEXT: store i64 1, i64* [[TMP47]], align 4 -// CHECK7-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK7-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK7-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 -// CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP52]] to i1 -// CHECK7-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) -// CHECK7-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 -// CHECK7-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK7-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP45:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 +// CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP45]] to i1 +// CHECK7-NEXT: [[TMP46:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP46]]) +// CHECK7-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0 +// CHECK7-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -11010,15 +10787,15 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP56:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP56]] +// CHECK7-NEXT: [[TMP49:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP49]] // CHECK7-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK7-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 -// CHECK7-NEXT: [[CONV6:%.*]] = sext i16 [[TMP57]] to i32 -// CHECK7-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 -// CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP58]] -// CHECK7-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) +// CHECK7-NEXT: [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 +// CHECK7-NEXT: [[CONV6:%.*]] = sext i16 [[TMP50]] to i32 +// CHECK7-NEXT: [[TMP51:%.*]] = load i32, i32* [[B]], align 4 +// CHECK7-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP51]] +// CHECK7-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) // CHECK7-NEXT: ret i32 [[ADD7]] // // @@ -11089,7 +10866,7 @@ // CHECK7-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK7-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK7-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK7-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: @@ -11157,7 +10934,7 @@ // CHECK7-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK7-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK7-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK7-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: @@ -11211,20 +10988,20 @@ // CHECK7-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK7-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK7: omp_if.then: -// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) +// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) // CHECK7-NEXT: br label [[OMP_IF_END:%.*]] // CHECK7: omp_if.else: // CHECK7-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK7-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK7-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 -// CHECK7-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]] +// CHECK7-NEXT: call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: // CHECK7-NEXT: ret void // // -// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11410,11 +11187,11 @@ // CHECK7-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK7-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK7-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK7-NEXT: ret void // // -// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11457,11 +11234,11 @@ // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK7-NEXT: ret void // // -// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11594,7 +11371,6 @@ // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS14:%.*]] = alloca [10 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS15:%.*]] = alloca [10 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS16:%.*]] = alloca [10 x i8*], align 4 -// CHECK8-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK8-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK8-NEXT: store i32 0, i32* [[A]], align 4 @@ -11726,106 +11502,87 @@ // CHECK8-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 // CHECK8-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* // CHECK8-NEXT: store i32 [[TMP57]], i32* [[TMP69]], align 4 -// CHECK8-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK8-NEXT: store i64 4, i64* [[TMP70]], align 4 -// CHECK8-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 -// CHECK8-NEXT: store i8* null, i8** [[TMP71]], align 4 -// CHECK8-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to [10 x float]** -// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP73]], align 4 -// CHECK8-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to [10 x float]** -// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP75]], align 4 -// CHECK8-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK8-NEXT: store i64 40, i64* [[TMP76]], align 4 -// CHECK8-NEXT: [[TMP77:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 -// CHECK8-NEXT: store i8* null, i8** [[TMP77]], align 4 -// CHECK8-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 0 +// CHECK8-NEXT: store i8* null, i8** [[TMP70]], align 4 +// CHECK8-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to [10 x float]** +// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP72]], align 4 +// CHECK8-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to [10 x float]** +// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP74]], align 4 +// CHECK8-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 1 +// CHECK8-NEXT: store i8* null, i8** [[TMP75]], align 4 +// CHECK8-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* +// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP77]], align 4 +// CHECK8-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 // CHECK8-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP79]], align 4 -// CHECK8-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 2 -// CHECK8-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* -// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP81]], align 4 -// CHECK8-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK8-NEXT: store i64 4, i64* [[TMP82]], align 4 -// CHECK8-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 -// CHECK8-NEXT: store i8* null, i8** [[TMP83]], align 4 -// CHECK8-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to float** -// CHECK8-NEXT: store float* [[VLA]], float** [[TMP85]], align 4 -// CHECK8-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to float** -// CHECK8-NEXT: store float* [[VLA]], float** [[TMP87]], align 4 -// CHECK8-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK8-NEXT: store i64 [[TMP62]], i64* [[TMP88]], align 4 -// CHECK8-NEXT: [[TMP89:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 -// CHECK8-NEXT: store i8* null, i8** [[TMP89]], align 4 -// CHECK8-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to [5 x [10 x double]]** -// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP91]], align 4 -// CHECK8-NEXT: [[TMP92:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to [5 x [10 x double]]** -// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP93]], align 4 -// CHECK8-NEXT: [[TMP94:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK8-NEXT: store i64 400, i64* [[TMP94]], align 4 -// CHECK8-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 2 +// CHECK8-NEXT: store i8* null, i8** [[TMP80]], align 4 +// CHECK8-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to float** +// CHECK8-NEXT: store float* [[VLA]], float** [[TMP82]], align 4 +// CHECK8-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to float** +// CHECK8-NEXT: store float* [[VLA]], float** [[TMP84]], align 4 +// CHECK8-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK8-NEXT: [[TMP85:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 3 +// CHECK8-NEXT: store i8* null, i8** [[TMP85]], align 4 +// CHECK8-NEXT: [[TMP86:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to [5 x [10 x double]]** +// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP87]], align 4 +// CHECK8-NEXT: [[TMP88:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to [5 x [10 x double]]** +// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP89]], align 4 +// CHECK8-NEXT: [[TMP90:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 4 +// CHECK8-NEXT: store i8* null, i8** [[TMP90]], align 4 +// CHECK8-NEXT: [[TMP91:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* +// CHECK8-NEXT: store i32 5, i32* [[TMP92]], align 4 +// CHECK8-NEXT: [[TMP93:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i32* +// CHECK8-NEXT: store i32 5, i32* [[TMP94]], align 4 +// CHECK8-NEXT: [[TMP95:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 // CHECK8-NEXT: store i8* null, i8** [[TMP95]], align 4 -// CHECK8-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP96:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 // CHECK8-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* -// CHECK8-NEXT: store i32 5, i32* [[TMP97]], align 4 -// CHECK8-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 5 +// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP97]], align 4 +// CHECK8-NEXT: [[TMP98:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 // CHECK8-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i32* -// CHECK8-NEXT: store i32 5, i32* [[TMP99]], align 4 -// CHECK8-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK8-NEXT: store i64 4, i64* [[TMP100]], align 4 -// CHECK8-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 5 -// CHECK8-NEXT: store i8* null, i8** [[TMP101]], align 4 -// CHECK8-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 6 -// CHECK8-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* -// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP103]], align 4 -// CHECK8-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 6 -// CHECK8-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32* -// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP105]], align 4 -// CHECK8-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK8-NEXT: store i64 4, i64* [[TMP106]], align 4 -// CHECK8-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 -// CHECK8-NEXT: store i8* null, i8** [[TMP107]], align 4 -// CHECK8-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 -// CHECK8-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to double** -// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP109]], align 4 -// CHECK8-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 -// CHECK8-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to double** -// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP111]], align 4 -// CHECK8-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK8-NEXT: store i64 [[TMP65]], i64* [[TMP112]], align 4 -// CHECK8-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 -// CHECK8-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK8-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 -// CHECK8-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to %struct.TT** -// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP115]], align 4 -// CHECK8-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 -// CHECK8-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to %struct.TT** -// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP117]], align 4 -// CHECK8-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK8-NEXT: store i64 12, i64* [[TMP118]], align 4 -// CHECK8-NEXT: [[TMP119:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 -// CHECK8-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK8-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 -// CHECK8-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* -// CHECK8-NEXT: store i32 [[TMP59]], i32* [[TMP121]], align 4 -// CHECK8-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 -// CHECK8-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* -// CHECK8-NEXT: store i32 [[TMP59]], i32* [[TMP123]], align 4 -// CHECK8-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK8-NEXT: store i64 4, i64* [[TMP124]], align 4 -// CHECK8-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 -// CHECK8-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK8-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.8, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) -// CHECK8-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK8-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] +// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP99]], align 4 +// CHECK8-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 6 +// CHECK8-NEXT: store i8* null, i8** [[TMP100]], align 4 +// CHECK8-NEXT: [[TMP101:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 7 +// CHECK8-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to double** +// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP102]], align 4 +// CHECK8-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 7 +// CHECK8-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to double** +// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP104]], align 4 +// CHECK8-NEXT: store i64 [[TMP65]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 7), align 4 +// CHECK8-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 7 +// CHECK8-NEXT: store i8* null, i8** [[TMP105]], align 4 +// CHECK8-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 8 +// CHECK8-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to %struct.TT** +// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP107]], align 4 +// CHECK8-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 8 +// CHECK8-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to %struct.TT** +// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP109]], align 4 +// CHECK8-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 8 +// CHECK8-NEXT: store i8* null, i8** [[TMP110]], align 4 +// CHECK8-NEXT: [[TMP111:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 9 +// CHECK8-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i32* +// CHECK8-NEXT: store i32 [[TMP59]], i32* [[TMP112]], align 4 +// CHECK8-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 9 +// CHECK8-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* +// CHECK8-NEXT: store i32 [[TMP59]], i32* [[TMP114]], align 4 +// CHECK8-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS16]], i32 0, i32 9 +// CHECK8-NEXT: store i8* null, i8** [[TMP115]], align 4 +// CHECK8-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS14]], i32 0, i32 0 +// CHECK8-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS15]], i32 0, i32 0 +// CHECK8-NEXT: [[TMP118:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140.region_id, i32 10, i8** [[TMP116]], i8** [[TMP117]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK8-NEXT: [[TMP119:%.*]] = icmp ne i32 [[TMP118]], 0 +// CHECK8-NEXT: br i1 [[TMP119]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK8: omp_offload.failed17: // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT18]] @@ -11835,10 +11592,10 @@ // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l140(i32 [[TMP57]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP59]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_IF_END20]] // CHECK8: omp_if.end20: -// CHECK8-NEXT: [[TMP131:%.*]] = load i32, i32* [[A]], align 4 -// CHECK8-NEXT: [[TMP132:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP132]]) -// CHECK8-NEXT: ret i32 [[TMP131]] +// CHECK8-NEXT: [[TMP120:%.*]] = load i32, i32* [[A]], align 4 +// CHECK8-NEXT: [[TMP121:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP121]]) +// CHECK8-NEXT: ret i32 [[TMP120]] // // // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l96 @@ -12548,7 +12305,6 @@ // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4 -// CHECK8-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4 // CHECK8-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK8-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -12588,69 +12344,57 @@ // CHECK8-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK8-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK8-NEXT: store double* [[A]], double** [[TMP16]], align 4 -// CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK8-NEXT: store i64 8, i64* [[TMP17]], align 4 -// CHECK8-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK8-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK8-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP20]], align 4 -// CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* -// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP22]], align 4 -// CHECK8-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK8-NEXT: store i64 4, i64* [[TMP23]], align 4 -// CHECK8-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK8-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK8-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK8-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK8-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP19]], align 4 +// CHECK8-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP21]], align 4 +// CHECK8-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK8-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK8-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK8-NEXT: store i32 2, i32* [[TMP24]], align 4 +// CHECK8-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK8-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* // CHECK8-NEXT: store i32 2, i32* [[TMP26]], align 4 -// CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK8-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* -// CHECK8-NEXT: store i32 2, i32* [[TMP28]], align 4 -// CHECK8-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK8-NEXT: store i64 4, i64* [[TMP29]], align 4 -// CHECK8-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK8-NEXT: store i8* null, i8** [[TMP30]], align 4 -// CHECK8-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK8-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* -// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 -// CHECK8-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK8-NEXT: store i64 4, i64* [[TMP35]], align 4 -// CHECK8-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK8-NEXT: store i8* null, i8** [[TMP36]], align 4 -// CHECK8-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 4 -// CHECK8-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 4 -// CHECK8-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK8-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 4 -// CHECK8-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK8-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK8-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK8-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* +// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 +// CHECK8-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK8-NEXT: store i8* null, i8** [[TMP32]], align 4 +// CHECK8-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 4 +// CHECK8-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 4 +// CHECK8-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 4), align 4 +// CHECK8-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK8-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK8-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP39]], align 4 +// CHECK8-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP41]], align 4 +// CHECK8-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK8-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK8-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK8-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP44]], align 4 -// CHECK8-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK8-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP46]], align 4 -// CHECK8-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK8-NEXT: store i64 1, i64* [[TMP47]], align 4 -// CHECK8-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK8-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK8-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP52:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 -// CHECK8-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP52]] to i1 -// CHECK8-NEXT: [[TMP53:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP53]]) -// CHECK8-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 -// CHECK8-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK8-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK8-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK8-NEXT: [[TMP45:%.*]] = load i8, i8* [[DOTCAPTURE_EXPR_]], align 1 +// CHECK8-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP45]] to i1 +// CHECK8-NEXT: [[TMP46:%.*]] = select i1 [[TOBOOL4]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP47:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 1, i32 [[TMP46]]) +// CHECK8-NEXT: [[TMP48:%.*]] = icmp ne i32 [[TMP47]], 0 +// CHECK8-NEXT: br i1 [[TMP48]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK8: omp_offload.failed: // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -12660,15 +12404,15 @@ // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l214(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_IF_END]] // CHECK8: omp_if.end: -// CHECK8-NEXT: [[TMP56:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP56]] +// CHECK8-NEXT: [[TMP49:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP49]] // CHECK8-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK8-NEXT: [[TMP57:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 -// CHECK8-NEXT: [[CONV6:%.*]] = sext i16 [[TMP57]] to i32 -// CHECK8-NEXT: [[TMP58:%.*]] = load i32, i32* [[B]], align 4 -// CHECK8-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP58]] -// CHECK8-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) +// CHECK8-NEXT: [[TMP50:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 +// CHECK8-NEXT: [[CONV6:%.*]] = sext i16 [[TMP50]] to i32 +// CHECK8-NEXT: [[TMP51:%.*]] = load i32, i32* [[B]], align 4 +// CHECK8-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP51]] +// CHECK8-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) // CHECK8-NEXT: ret i32 [[ADD7]] // // @@ -12739,7 +12483,7 @@ // CHECK8-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK8-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK8-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l195.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK8-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK8-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK8: omp_offload.failed: @@ -12807,7 +12551,7 @@ // CHECK8-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK8-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.15, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) +// CHECK8-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l178.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 1, i32 0) // CHECK8-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK8-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK8: omp_offload.failed: @@ -12861,20 +12605,20 @@ // CHECK8-NEXT: [[TOBOOL4:%.*]] = trunc i8 [[TMP9]] to i1 // CHECK8-NEXT: br i1 [[TOBOOL4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_ELSE:%.*]] // CHECK8: omp_if.then: -// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..9 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) +// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..10 to void (i32*, i32*, ...)*), %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) // CHECK8-NEXT: br label [[OMP_IF_END:%.*]] // CHECK8: omp_if.else: // CHECK8-NEXT: call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK8-NEXT: store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4 // CHECK8-NEXT: store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4 -// CHECK8-NEXT: call void @.omp_outlined..9(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]] +// CHECK8-NEXT: call void @.omp_outlined..10(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], %struct.S1* [[TMP1]], i32 [[TMP6]], i32 [[TMP2]], i32 [[TMP3]], i16* [[TMP4]], i32 [[TMP8]]) #[[ATTR4]] // CHECK8-NEXT: call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB2]], i32 [[TMP0]]) // CHECK8-NEXT: br label [[OMP_IF_END]] // CHECK8: omp_if.end: // CHECK8-NEXT: ret void // // -// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK8-NEXT: entry: // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13060,11 +12804,11 @@ // CHECK8-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK8-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK8-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK8-NEXT: ret void // // -// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK8-NEXT: entry: // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13107,11 +12851,11 @@ // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK8-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK8-NEXT: ret void // // -// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK8-NEXT: entry: // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_simd_codegen.cpp b/clang/test/OpenMP/target_simd_codegen.cpp --- a/clang/test/OpenMP/target_simd_codegen.cpp +++ b/clang/test/OpenMP/target_simd_codegen.cpp @@ -83,11 +83,14 @@ // CHECK-DAG: [[MAPT2:@.+]] = private unnamed_addr constant [3 x i64] [i64 800, i64 800, i64 800] // CHECK-DAG: [[SIZET3:@.+]] = private unnamed_addr constant [2 x i64] [i64 4, i64 2] // CHECK-DAG: [[MAPT3:@.+]] = private unnamed_addr constant [2 x i64] [i64 800, i64 800] +// CHECK-DAG: [[SIZET4:@.+]] = private unnamed_addr global [9 x i64] [i64 4, i64 40, i64 {{8|4}}, i64 0, i64 400, i64 {{8|4}}, i64 {{8|4}}, i64 0, i64 {{16|12}}] // CHECK-DAG: [[MAPT4:@.+]] = private unnamed_addr constant [9 x i64] [i64 800, i64 547, i64 800, i64 547, i64 547, i64 800, i64 800, i64 547, i64 547] // CHECK-DAG: [[SIZET5:@.+]] = private unnamed_addr constant [3 x i64] [i64 4, i64 2, i64 40] // CHECK-DAG: [[MAPT5:@.+]] = private unnamed_addr constant [3 x i64] [i64 800, i64 800, i64 547] // CHECK-DAG: [[SIZET6:@.+]] = private unnamed_addr constant [4 x i64] [i64 4, i64 2, i64 1, i64 40] // CHECK-DAG: [[MAPT6:@.+]] = private unnamed_addr constant [4 x i64] [i64 800, i64 800, i64 800, i64 547] +// OMP45-DAG: [[SIZET7:@.+]] = private unnamed_addr global [5 x i64] [i64 {{8|4}}, i64 4, i64 {{8|4}}, i64 {{8|4}}, i64 0] +// OMP50-DAG: [[SIZET7:@.+]] = private unnamed_addr global [6 x i64] [i64 {{8|4}}, i64 4, i64 {{8|4}}, i64 {{8|4}}, i64 0, i64 1] // OMP45-DAG: [[MAPT7:@.+]] = private unnamed_addr constant [5 x i64] [i64 547, i64 800, i64 800, i64 800, i64 547] // OMP50-DAG: [[MAPT7:@.+]] = private unnamed_addr constant [6 x i64] [i64 547, i64 800, i64 800, i64 800, i64 547, i64 800] // CHECK-DAG: @{{.*}} = weak constant i8 0 @@ -237,37 +240,27 @@ // CHECK-32: [[CNSZSIZE:%.+]] = mul nuw i32 [[CNELEMSIZE2]], 8 // CHECK-32: [[CNSIZE:%.+]] = sext i32 [[CNSZSIZE]] to i64 - // CHECK-DAG: [[RET:%.+]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 9, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* [[SR:%[^,]+]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[MAPT4]], i32 0, i32 0), i8** null, i8** null, i32 1, i32 1) + // CHECK-DAG: [[RET:%.+]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 9, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET4]], i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[MAPT4]], i32 0, i32 0), i8** null, i8** null, i32 1, i32 1) // CHECK-DAG: [[BPR]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP:%[^,]+]], i32 0, i32 0 // CHECK-DAG: [[PR]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P:%[^,]+]], i32 0, i32 0 - // CHECK-DAG: [[SR]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S:%[^,]+]], i32 0, i32 0 - // CHECK-DAG: [[SADDR0:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX0:[0-9]+]] - // CHECK-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX0]] + // CHECK-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX0:[0-9]+]] // CHECK-DAG: [[PADDR0:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX0]] - // CHECK-DAG: [[SADDR1:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX1:[0-9]+]] - // CHECK-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX1]] + // CHECK-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX1:[0-9]+]] // CHECK-DAG: [[PADDR1:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX1]] - // CHECK-DAG: [[SADDR2:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX2:[0-9]+]] - // CHECK-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX2]] + // CHECK-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX2:[0-9]+]] // CHECK-DAG: [[PADDR2:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX2]] - // CHECK-DAG: [[SADDR3:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX3:[0-9]+]] - // CHECK-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX3]] + // CHECK-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX3:[0-9]+]] // CHECK-DAG: [[PADDR3:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX3]] - // CHECK-DAG: [[SADDR4:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX4:[0-9]+]] - // CHECK-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX4]] + // CHECK-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX4:[0-9]+]] // CHECK-DAG: [[PADDR4:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX4]] - // CHECK-DAG: [[SADDR5:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX5:[0-9]+]] - // CHECK-DAG: [[BPADDR5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX5]] + // CHECK-DAG: [[BPADDR5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX5:[0-9]+]] // CHECK-DAG: [[PADDR5:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX5]] - // CHECK-DAG: [[SADDR6:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX6:[0-9]+]] - // CHECK-DAG: [[BPADDR6:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX6]] + // CHECK-DAG: [[BPADDR6:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX6:[0-9]+]] // CHECK-DAG: [[PADDR6:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX6]] - // CHECK-DAG: [[SADDR7:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX7:[0-9]+]] - // CHECK-DAG: [[BPADDR7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX7]] + // CHECK-DAG: [[BPADDR7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX7:[0-9]+]] // CHECK-DAG: [[PADDR7:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX7]] - // CHECK-DAG: [[SADDR8:%.+]] = getelementptr inbounds [9 x i64], [9 x i64]* [[S]], i32 0, i32 [[IDX8:[0-9]+]] - // CHECK-DAG: [[BPADDR8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX8]] + // CHECK-DAG: [[BPADDR8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[BP]], i32 0, i32 [[IDX8:[0-9]+]] // CHECK-DAG: [[PADDR8:%.+]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[P]], i32 0, i32 [[IDX8]] // The names below are not necessarily consistent with the names used for the @@ -276,55 +269,48 @@ // CHECK-DAG: store i[[SZ]] [[A_CVAL]], i[[SZ]]* [[CPADDR0:%.+]], // CHECK-DAG: [[CBPADDR0]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // CHECK-DAG: [[CPADDR0]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* - // CHECK-DAG: store i64 4, i64* {{%[^,]+}} // CHECK-DAG: store [10 x float]* %{{.+}}, [10 x float]** [[CBPADDR1:%.+]], // CHECK-DAG: store [10 x float]* %{{.+}}, [10 x float]** [[CPADDR1:%.+]], // CHECK-DAG: [[CBPADDR1]] = bitcast i8** {{%[^,]+}} to [10 x float]** // CHECK-DAG: [[CPADDR1]] = bitcast i8** {{%[^,]+}} to [10 x float]** - // CHECK-DAG: store i64 40, i64* {{%[^,]+}} // CHECK-DAG: store i[[SZ]] %{{.+}}, i[[SZ]]* [[CBPADDR2:%.+]], // CHECK-DAG: store i[[SZ]] %{{.+}}, i[[SZ]]* [[CPADDR2:%.+]], // CHECK-DAG: [[CBPADDR2]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // CHECK-DAG: [[CPADDR2]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* - // CHECK-DAG: store i64 {{4|8}}, i64* {{%[^,]+}} // CHECK-DAG: store float* %{{.+}}, float** [[CBPADDR3:%.+]], // CHECK-DAG: store float* %{{.+}}, float** [[CPADDR3:%.+]], // CHECK-DAG: [[CBPADDR3]] = bitcast i8** {{%[^,]+}} to float** // CHECK-DAG: [[CPADDR3]] = bitcast i8** {{%[^,]+}} to float** - // CHECK-DAG: store i64 [[BNSIZE]], i64* {{%[^,]+}} + // CHECK-DAG: store i64 [[BNSIZE]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET4]], i32 0, i32 3), // CHECK-DAG: store [5 x [10 x double]]* %{{.+}}, [5 x [10 x double]]** [[CBPADDR4:%.+]], // CHECK-DAG: store [5 x [10 x double]]* %{{.+}}, [5 x [10 x double]]** [[CPADDR4:%.+]], // CHECK-DAG: [[CBPADDR4]] = bitcast i8** {{%[^,]+}} to [5 x [10 x double]]** // CHECK-DAG: [[CPADDR4]] = bitcast i8** {{%[^,]+}} to [5 x [10 x double]]** - // CHECK-DAG: store i64 400, i64* {{%[^,]+}} // CHECK-DAG: store i[[SZ]] 5, i[[SZ]]* [[CBPADDR5:%.+]], // CHECK-DAG: store i[[SZ]] 5, i[[SZ]]* [[CPADDR5:%.+]], // CHECK-DAG: [[CBPADDR5]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // CHECK-DAG: [[CPADDR5]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* - // CHECK-DAG: store i64 {{4|8}}, i64* {{%[^,]+}} // CHECK-DAG: store i[[SZ]] %{{.+}}, i[[SZ]]* [[CBPADDR6:%.+]], // CHECK-DAG: store i[[SZ]] %{{.+}}, i[[SZ]]* [[CPADDR6:%.+]], // CHECK-DAG: [[CBPADDR6]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // CHECK-DAG: [[CPADDR6]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* - // CHECK-DAG: store i64 {{4|8}}, i64* {{%[^,]+}} // CHECK-DAG: store double* %{{.+}}, double** [[CBPADDR7:%.+]], // CHECK-DAG: store double* %{{.+}}, double** [[CPADDR7:%.+]], // CHECK-DAG: [[CBPADDR7]] = bitcast i8** {{%[^,]+}} to double** // CHECK-DAG: [[CPADDR7]] = bitcast i8** {{%[^,]+}} to double** - // CHECK-DAG: store i64 [[CNSIZE]], i64* {{%[^,]+}} + // CHECK-DAG: store i64 [[CNSIZE]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* [[SIZET4]], i32 0, i32 7), // CHECK-DAG: store [[TT]]* %{{.+}}, [[TT]]** [[CBPADDR8:%.+]], // CHECK-DAG: store [[TT]]* %{{.+}}, [[TT]]** [[CPADDR8:%.+]], // CHECK-DAG: [[CBPADDR8]] = bitcast i8** {{%[^,]+}} to [[TT]]** // CHECK-DAG: [[CPADDR8]] = bitcast i8** {{%[^,]+}} to [[TT]]** - // CHECK-DAG: store i64 {{12|16}}, i64* {{%[^,]+}} // CHECK-NEXT: [[ERROR:%.+]] = icmp ne i32 [[RET]], 0 // CHECK-NEXT: br i1 [[ERROR]], label %[[FAIL:[^,]+]], label %[[END:[^,]+]] @@ -532,46 +518,33 @@ // CHECK-32: [[CSZSIZE:%.+]] = mul nuw i32 [[CELEMSIZE2]], 2 // CHECK-32: [[CSIZE:%.+]] = sext i32 [[CSZSIZE]] to i64 -// OMP45-DAG: [[RET:%.+]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 5, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* [[SR:%[^,]+]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPT7]], i32 0, i32 0), i8** null, i8** null, i32 1, i32 1) -// OMP50-DAG: [[RET:%.+]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 6, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* [[SR:%[^,]+]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* [[MAPT7]], i32 0, i32 0), i8** null, i8** null, i32 1, i32 1) +// OMP45-DAG: [[RET:%.+]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 5, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[SIZET7]], i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* [[MAPT7]], i32 0, i32 0), i8** null, i8** null, i32 1, i32 1) +// OMP50-DAG: [[RET:%.+]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @{{.+}}, i64 -1, i8* @{{[^,]+}}, i32 6, i8** [[BPR:%[^,]+]], i8** [[PR:%[^,]+]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* [[SIZET7]], i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* [[MAPT7]], i32 0, i32 0), i8** null, i8** null, i32 1, i32 1) // OMP45-DAG: [[BPR]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP:%.+]], i32 0, i32 0 // OMP45-DAG: [[PR]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P:%.+]], i32 0, i32 0 -// OMP45-DAG: [[SR]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S:%.+]], i32 0, i32 0 -// OMP45-DAG: [[SADDR0:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 [[IDX0:[0-9]+]] -// OMP45-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX0]] +// OMP45-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX0:[0-9]+]] // OMP45-DAG: [[PADDR0:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 [[IDX0]] -// OMP45-DAG: [[SADDR1:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 [[IDX1:[0-9]+]] -// OMP45-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX1]] +// OMP45-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX1:[0-9]+]] // OMP45-DAG: [[PADDR1:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 [[IDX1]] -// OMP45-DAG: [[SADDR2:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 [[IDX2:[0-9]+]] -// OMP45-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX2]] +// OMP45-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX2:[0-9]+]] // OMP45-DAG: [[PADDR2:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 [[IDX2]] -// OMP45-DAG: [[SADDR3:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 [[IDX3:[0-9]+]] -// OMP45-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX3]] +// OMP45-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX3:[0-9]+]] // OMP45-DAG: [[PADDR3:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 [[IDX3]] -// OMP45-DAG: [[SADDR4:%.+]] = getelementptr inbounds [5 x i64], [5 x i64]* [[S]], i32 [[IDX4:[0-9]+]] -// OMP45-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX4]] +// OMP45-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[BP]], i32 [[IDX4:[0-9]+]] // OMP45-DAG: [[PADDR4:%.+]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[P]], i32 [[IDX4]] // OMP50-DAG: [[BPR]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP:%.+]], i32 0, i32 0 // OMP50-DAG: [[PR]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[P:%.+]], i32 0, i32 0 -// OMP50-DAG: [[SR]] = getelementptr inbounds [6 x i64], [6 x i64]* [[S:%.+]], i32 0, i32 0 -// OMP50-DAG: [[SADDR0:%.+]] = getelementptr inbounds [6 x i64], [6 x i64]* [[S]], i32 [[IDX0:[0-9]+]] -// OMP50-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX0]] +// OMP50-DAG: [[BPADDR0:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX0:[0-9]+]] // OMP50-DAG: [[PADDR0:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[P]], i32 [[IDX0]] -// OMP50-DAG: [[SADDR1:%.+]] = getelementptr inbounds [6 x i64], [6 x i64]* [[S]], i32 [[IDX1:[0-9]+]] -// OMP50-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX1]] +// OMP50-DAG: [[BPADDR1:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX1:[0-9]+]] // OMP50-DAG: [[PADDR1:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[P]], i32 [[IDX1]] -// OMP50-DAG: [[SADDR2:%.+]] = getelementptr inbounds [6 x i64], [6 x i64]* [[S]], i32 [[IDX2:[0-9]+]] -// OMP50-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX2]] +// OMP50-DAG: [[BPADDR2:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX2:[0-9]+]] // OMP50-DAG: [[PADDR2:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[P]], i32 [[IDX2]] -// OMP50-DAG: [[SADDR3:%.+]] = getelementptr inbounds [6 x i64], [6 x i64]* [[S]], i32 [[IDX3:[0-9]+]] -// OMP50-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX3]] +// OMP50-DAG: [[BPADDR3:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX3:[0-9]+]] // OMP50-DAG: [[PADDR3:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[P]], i32 [[IDX3]] -// OMP50-DAG: [[SADDR4:%.+]] = getelementptr inbounds [6 x i64], [6 x i64]* [[S]], i32 [[IDX4:[0-9]+]] -// OMP50-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX4]] +// OMP50-DAG: [[BPADDR4:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX4:[0-9]+]] // OMP50-DAG: [[PADDR4:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[P]], i32 [[IDX4]] -// OMP50-DAG: [[SADDR5:%.+]] = getelementptr inbounds [6 x i64], [6 x i64]* [[S]], i32 [[IDX5:[0-9]+]] -// OMP50-DAG: [[BPADDR5:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX5]] +// OMP50-DAG: [[BPADDR5:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[BP]], i32 [[IDX5:[0-9]+]] // OMP50-DAG: [[PADDR5:%.+]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[P]], i32 [[IDX5]] // The names below are not necessarily consistent with the names used for the @@ -580,37 +553,32 @@ // CHECK-DAG: store [[S1]]* %{{.+}}, [[S1]]** [[CPADDR0:%.+]], // CHECK-DAG: [[CBPADDR0]] = bitcast i8** {{%[^,]+}} to [[S1]]** // CHECK-DAG: [[CPADDR0]] = bitcast i8** {{%[^,]+}} to [[S1]]** -// CHECK-DAG: store i64 {{4|8}}, i64* {{%[^,]+}} // CHECK-DAG: store i[[SZ]] [[B_CVAL]], i[[SZ]]* [[CBPADDR1:%.+]], // CHECK-DAG: store i[[SZ]] [[B_CVAL]], i[[SZ]]* [[CPADDR1:%.+]], // CHECK-DAG: [[CBPADDR1]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // CHECK-DAG: [[CPADDR1]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* -// CHECK-DAG: store i64 4, i64* {{%[^,]+}} // CHECK-DAG: store i[[SZ]] 2, i[[SZ]]* [[CBPADDR2:%.+]], // CHECK-DAG: store i[[SZ]] 2, i[[SZ]]* [[CPADDR2:%.+]], // CHECK-DAG: [[CBPADDR2]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // CHECK-DAG: [[CPADDR2]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* -// CHECK-DAG: store i64 {{4|8}}, i64* {{%[^,]+}} // CHECK-DAG: store i[[SZ]] %{{.+}}, i[[SZ]]* [[CBPADDR3:%.+]], // CHECK-DAG: store i[[SZ]] %{{.+}}, i[[SZ]]* [[CPADDR3:%.+]], // CHECK-DAG: [[CBPADDR3]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // CHECK-DAG: [[CPADDR3]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* -// CHECK-DAG: store i64 {{4|8}}, i64* {{%[^,]+}} // CHECK-DAG: store i16* %{{.+}}, i16** [[CBPADDR4:%.+]], // CHECK-DAG: store i16* %{{.+}}, i16** [[CPADDR4:%.+]], // CHECK-DAG: [[CBPADDR4]] = bitcast i8** {{%[^,]+}} to i16** // CHECK-DAG: [[CPADDR4]] = bitcast i8** {{%[^,]+}} to i16** -// CHECK-DAG: store i64 [[CSIZE]], i64* {{%[^,]+}} +// CHECK-DAG: store i64 [[CSIZE]], i64* getelementptr inbounds ([{{.+}} x i64], [{{.+}} x i64]* [[SIZET7]], i32 0, i32 4), // OMP50-DAG: store i[[SZ]] [[SIMD_COND]], i[[SZ]]* [[CBPADDR5:%.+]], // OMP50-DAG: store i[[SZ]] [[SIMD_COND]], i[[SZ]]* [[CPADDR5:%.+]], // OMP50-DAG: [[CBPADDR5]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* // OMP50-DAG: [[CPADDR5]] = bitcast i8** {{%[^,]+}} to i[[SZ]]* -// OMP50-DAG: store i64 1, i64* {{%[^,]+}} // CHECK-NEXT: [[ERROR:%.+]] = icmp ne i32 [[RET]], 0 // CHECK-NEXT: br i1 [[ERROR]], label %[[FAIL:[^,]+]], label %[[END:[^,]+]] diff --git a/clang/test/OpenMP/target_teams_codegen.cpp b/clang/test/OpenMP/target_teams_codegen.cpp --- a/clang/test/OpenMP/target_teams_codegen.cpp +++ b/clang/test/OpenMP/target_teams_codegen.cpp @@ -337,7 +337,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS25:%.*]] = alloca [9 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS27:%.*]] = alloca [9 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK1-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [1 x i8*], align 8 @@ -522,96 +521,79 @@ // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 // CHECK1-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK1-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 // CHECK1-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK1-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK1-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 // CHECK1-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 // CHECK1-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 // CHECK1-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK1-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 -// CHECK1-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK1-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 -// CHECK1-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK1-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 -// CHECK1-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 -// CHECK1-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK1-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK1-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 +// CHECK1-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK1-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK1-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 +// CHECK1-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 +// CHECK1-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK1-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] // CHECK1: omp_offload.failed28: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT29]] @@ -622,52 +604,52 @@ // CHECK1-NEXT: br label [[OMP_IF_END31]] // CHECK1: omp_if.end31: // CHECK1-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK1-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK1-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 // CHECK1-NEXT: [[CONV32:%.*]] = bitcast i64* [[NN_CASTED]] to i32* -// CHECK1-NEXT: store i32 [[TMP154]], i32* [[CONV32]], align 4 -// CHECK1-NEXT: [[TMP155:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK1-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK1-NEXT: store i64 [[TMP155]], i64* [[TMP157]], align 8 -// CHECK1-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK1-NEXT: store i64 [[TMP155]], i64* [[TMP159]], align 8 -// CHECK1-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP160]], align 8 -// CHECK1-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK1-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK1-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK1-NEXT: store i32 [[TMP144]], i32* [[CONV32]], align 4 +// CHECK1-NEXT: [[TMP145:%.*]] = load i64, i64* [[NN_CASTED]], align 8 +// CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* +// CHECK1-NEXT: store i64 [[TMP145]], i64* [[TMP147]], align 8 +// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* +// CHECK1-NEXT: store i64 [[TMP145]], i64* [[TMP149]], align 8 +// CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP150]], align 8 +// CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK1-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK1: omp_offload.failed36: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP155]]) #[[ATTR3]] +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP145]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK1: omp_offload.cont37: -// CHECK1-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK1-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 // CHECK1-NEXT: [[CONV39:%.*]] = bitcast i64* [[NN_CASTED38]] to i32* -// CHECK1-NEXT: store i32 [[TMP165]], i32* [[CONV39]], align 4 -// CHECK1-NEXT: [[TMP166:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 -// CHECK1-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i64* -// CHECK1-NEXT: store i64 [[TMP166]], i64* [[TMP168]], align 8 -// CHECK1-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i64* -// CHECK1-NEXT: store i64 [[TMP166]], i64* [[TMP170]], align 8 -// CHECK1-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP171]], align 8 -// CHECK1-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK1-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK1-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] +// CHECK1-NEXT: store i32 [[TMP155]], i32* [[CONV39]], align 4 +// CHECK1-NEXT: [[TMP156:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 +// CHECK1-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i64* +// CHECK1-NEXT: store i64 [[TMP156]], i64* [[TMP158]], align 8 +// CHECK1-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i64* +// CHECK1-NEXT: store i64 [[TMP156]], i64* [[TMP160]], align 8 +// CHECK1-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP161]], align 8 +// CHECK1-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK1-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] // CHECK1: omp_offload.failed43: -// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP166]]) #[[ATTR3]] +// CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP156]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT44]] // CHECK1: omp_offload.cont44: -// CHECK1-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK1-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK1-NEXT: ret i32 [[TMP176]] +// CHECK1-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK1-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK1-NEXT: ret i32 [[TMP166]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -1049,11 +1031,11 @@ // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1068,11 +1050,11 @@ // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1096,11 +1078,11 @@ // CHECK1-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK1-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1110,11 +1092,11 @@ // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK1-NEXT: store i64 [[NN]], i64* [[NN_ADDR]], align 8 // CHECK1-NEXT: [[CONV:%.*]] = bitcast i64* [[NN_ADDR]] to i32* -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[CONV]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[CONV]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1149,7 +1131,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP6]], align 8 // CHECK1-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK1-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK1-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1165,11 +1147,11 @@ // CHECK1-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK1-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK1-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i64 [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1227,7 +1209,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -1258,56 +1239,46 @@ // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK1-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK1-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1317,15 +1288,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK1-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK1-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK1-NEXT: ret i32 [[ADD4]] // // @@ -1397,7 +1368,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK1-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK1-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1466,7 +1437,7 @@ // CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1505,11 +1476,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1578,11 +1549,11 @@ // CHECK1-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK1-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1643,11 +1614,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1720,7 +1691,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS25:%.*]] = alloca [9 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS27:%.*]] = alloca [9 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK2-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK2-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [1 x i8*], align 8 @@ -1905,96 +1875,79 @@ // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 // CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK2-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 // CHECK2-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK2-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK2-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK2-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK2-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK2-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK2-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 // CHECK2-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK2-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 // CHECK2-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 // CHECK2-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK2-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 -// CHECK2-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK2-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK2-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK2-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 -// CHECK2-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK2-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK2-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK2-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 -// CHECK2-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK2-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK2-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK2-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK2-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 -// CHECK2-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK2-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK2-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK2-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 +// CHECK2-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK2-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK2-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK2-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 +// CHECK2-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 +// CHECK2-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK2-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK2-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] // CHECK2: omp_offload.failed28: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT29]] @@ -2005,52 +1958,52 @@ // CHECK2-NEXT: br label [[OMP_IF_END31]] // CHECK2: omp_if.end31: // CHECK2-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK2-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK2-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 // CHECK2-NEXT: [[CONV32:%.*]] = bitcast i64* [[NN_CASTED]] to i32* -// CHECK2-NEXT: store i32 [[TMP154]], i32* [[CONV32]], align 4 -// CHECK2-NEXT: [[TMP155:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK2-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK2-NEXT: store i64 [[TMP155]], i64* [[TMP157]], align 8 -// CHECK2-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK2-NEXT: store i64 [[TMP155]], i64* [[TMP159]], align 8 -// CHECK2-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP160]], align 8 -// CHECK2-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK2-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK2-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK2-NEXT: store i32 [[TMP144]], i32* [[CONV32]], align 4 +// CHECK2-NEXT: [[TMP145:%.*]] = load i64, i64* [[NN_CASTED]], align 8 +// CHECK2-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* +// CHECK2-NEXT: store i64 [[TMP145]], i64* [[TMP147]], align 8 +// CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* +// CHECK2-NEXT: store i64 [[TMP145]], i64* [[TMP149]], align 8 +// CHECK2-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP150]], align 8 +// CHECK2-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK2-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK2: omp_offload.failed36: -// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP155]]) #[[ATTR3]] +// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP145]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK2: omp_offload.cont37: -// CHECK2-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK2-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 // CHECK2-NEXT: [[CONV39:%.*]] = bitcast i64* [[NN_CASTED38]] to i32* -// CHECK2-NEXT: store i32 [[TMP165]], i32* [[CONV39]], align 4 -// CHECK2-NEXT: [[TMP166:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 -// CHECK2-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i64* -// CHECK2-NEXT: store i64 [[TMP166]], i64* [[TMP168]], align 8 -// CHECK2-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i64* -// CHECK2-NEXT: store i64 [[TMP166]], i64* [[TMP170]], align 8 -// CHECK2-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP171]], align 8 -// CHECK2-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK2-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK2-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] +// CHECK2-NEXT: store i32 [[TMP155]], i32* [[CONV39]], align 4 +// CHECK2-NEXT: [[TMP156:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 +// CHECK2-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i64* +// CHECK2-NEXT: store i64 [[TMP156]], i64* [[TMP158]], align 8 +// CHECK2-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i64* +// CHECK2-NEXT: store i64 [[TMP156]], i64* [[TMP160]], align 8 +// CHECK2-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP161]], align 8 +// CHECK2-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK2-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] // CHECK2: omp_offload.failed43: -// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP166]]) #[[ATTR3]] +// CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP156]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT44]] // CHECK2: omp_offload.cont44: -// CHECK2-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK2-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK2-NEXT: ret i32 [[TMP176]] +// CHECK2-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK2-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK2-NEXT: ret i32 [[TMP166]] // // // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -2432,11 +2385,11 @@ // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2451,11 +2404,11 @@ // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2479,11 +2432,11 @@ // CHECK2-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK2-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2493,11 +2446,11 @@ // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK2-NEXT: store i64 [[NN]], i64* [[NN_ADDR]], align 8 // CHECK2-NEXT: [[CONV:%.*]] = bitcast i64* [[NN_ADDR]] to i32* -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[CONV]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[CONV]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2532,7 +2485,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP6]], align 8 // CHECK2-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK2-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK2-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -2548,11 +2501,11 @@ // CHECK2-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK2-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK2-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i64 [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2610,7 +2563,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -2641,56 +2593,46 @@ // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK2-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK2-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK2-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2700,15 +2642,15 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_IF_END]] // CHECK2: omp_if.end: -// CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK2-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK2-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK2-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK2-NEXT: ret i32 [[ADD4]] // // @@ -2780,7 +2722,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK2-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK2-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -2849,7 +2791,7 @@ // CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -2888,11 +2830,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2961,11 +2903,11 @@ // CHECK2-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK2-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK2-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3026,11 +2968,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3103,7 +3045,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [9 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [9 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK3-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x i8*], align 4 @@ -3283,96 +3224,79 @@ // CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK3-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK3-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK3-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK3-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 // CHECK3-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK3-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK3-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK3-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 -// CHECK3-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK3-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK3-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 -// CHECK3-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK3-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK3-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 -// CHECK3-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK3-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 -// CHECK3-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK3-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK3-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK3-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 +// CHECK3-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK3-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 +// CHECK3-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 +// CHECK3-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK3-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK3: omp_offload.failed23: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -3383,50 +3307,50 @@ // CHECK3-NEXT: br label [[OMP_IF_END26]] // CHECK3: omp_if.end26: // CHECK3-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK3-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK3-NEXT: store i32 [[TMP154]], i32* [[NN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK3-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32* -// CHECK3-NEXT: store i32 [[TMP155]], i32* [[TMP157]], align 4 -// CHECK3-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32* -// CHECK3-NEXT: store i32 [[TMP155]], i32* [[TMP159]], align 4 -// CHECK3-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP160]], align 4 -// CHECK3-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK3-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK3-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK3-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK3-NEXT: store i32 [[TMP144]], i32* [[NN_CASTED]], align 4 +// CHECK3-NEXT: [[TMP145:%.*]] = load i32, i32* [[NN_CASTED]], align 4 +// CHECK3-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* +// CHECK3-NEXT: store i32 [[TMP145]], i32* [[TMP147]], align 4 +// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* +// CHECK3-NEXT: store i32 [[TMP145]], i32* [[TMP149]], align 4 +// CHECK3-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP150]], align 4 +// CHECK3-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK3-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK3: omp_offload.failed30: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP155]]) #[[ATTR3]] +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP145]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT31]] // CHECK3: omp_offload.cont31: -// CHECK3-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK3-NEXT: store i32 [[TMP165]], i32* [[NN_CASTED32]], align 4 -// CHECK3-NEXT: [[TMP166:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 -// CHECK3-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i32* -// CHECK3-NEXT: store i32 [[TMP166]], i32* [[TMP168]], align 4 -// CHECK3-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i32* -// CHECK3-NEXT: store i32 [[TMP166]], i32* [[TMP170]], align 4 -// CHECK3-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP171]], align 4 -// CHECK3-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK3-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK3-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK3-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK3-NEXT: store i32 [[TMP155]], i32* [[NN_CASTED32]], align 4 +// CHECK3-NEXT: [[TMP156:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 +// CHECK3-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i32* +// CHECK3-NEXT: store i32 [[TMP156]], i32* [[TMP158]], align 4 +// CHECK3-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i32* +// CHECK3-NEXT: store i32 [[TMP156]], i32* [[TMP160]], align 4 +// CHECK3-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP161]], align 4 +// CHECK3-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK3-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK3: omp_offload.failed36: -// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP166]]) #[[ATTR3]] +// CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP156]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK3: omp_offload.cont37: -// CHECK3-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK3-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK3-NEXT: ret i32 [[TMP176]] +// CHECK3-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK3-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK3-NEXT: ret i32 [[TMP166]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -3793,11 +3717,11 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3810,11 +3734,11 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3835,11 +3759,11 @@ // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK3-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3848,11 +3772,11 @@ // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK3-NEXT: store i32 [[NN]], i32* [[NN_ADDR]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3886,7 +3810,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP5]], align 4 // CHECK3-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK3-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 // CHECK3-NEXT: br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -3902,11 +3826,11 @@ // CHECK3-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK3-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i32 [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3964,7 +3888,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -3994,56 +3917,46 @@ // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK3-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK3-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK3-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4053,15 +3966,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK3-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK3-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK3-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -4132,7 +4045,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK3-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK3-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -4200,7 +4113,7 @@ // CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -4237,11 +4150,11 @@ // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4307,11 +4220,11 @@ // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK3-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4369,11 +4282,11 @@ // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4445,7 +4358,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [9 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [9 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK4-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK4-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x i8*], align 4 @@ -4625,96 +4537,79 @@ // CHECK4-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK4-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK4-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK4-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK4-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK4-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK4-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK4-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK4-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK4-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK4-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK4-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 // CHECK4-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK4-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK4-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK4-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK4-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK4-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 -// CHECK4-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK4-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK4-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK4-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK4-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK4-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 -// CHECK4-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK4-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK4-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK4-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 -// CHECK4-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK4-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK4-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK4-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK4-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK4-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 -// CHECK4-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK4-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK4-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK4-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK4-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 +// CHECK4-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK4-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK4-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK4-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK4-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 +// CHECK4-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK4-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 +// CHECK4-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK4-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK4-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK4: omp_offload.failed23: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -4725,50 +4620,50 @@ // CHECK4-NEXT: br label [[OMP_IF_END26]] // CHECK4: omp_if.end26: // CHECK4-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK4-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK4-NEXT: store i32 [[TMP154]], i32* [[NN_CASTED]], align 4 -// CHECK4-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK4-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32* -// CHECK4-NEXT: store i32 [[TMP155]], i32* [[TMP157]], align 4 -// CHECK4-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32* -// CHECK4-NEXT: store i32 [[TMP155]], i32* [[TMP159]], align 4 -// CHECK4-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP160]], align 4 -// CHECK4-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK4-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK4-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK4-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK4-NEXT: store i32 [[TMP144]], i32* [[NN_CASTED]], align 4 +// CHECK4-NEXT: [[TMP145:%.*]] = load i32, i32* [[NN_CASTED]], align 4 +// CHECK4-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* +// CHECK4-NEXT: store i32 [[TMP145]], i32* [[TMP147]], align 4 +// CHECK4-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* +// CHECK4-NEXT: store i32 [[TMP145]], i32* [[TMP149]], align 4 +// CHECK4-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP150]], align 4 +// CHECK4-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK4-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK4: omp_offload.failed30: -// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP155]]) #[[ATTR3]] +// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP145]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT31]] // CHECK4: omp_offload.cont31: -// CHECK4-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK4-NEXT: store i32 [[TMP165]], i32* [[NN_CASTED32]], align 4 -// CHECK4-NEXT: [[TMP166:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 -// CHECK4-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i32* -// CHECK4-NEXT: store i32 [[TMP166]], i32* [[TMP168]], align 4 -// CHECK4-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i32* -// CHECK4-NEXT: store i32 [[TMP166]], i32* [[TMP170]], align 4 -// CHECK4-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP171]], align 4 -// CHECK4-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK4-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK4-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK4-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK4-NEXT: store i32 [[TMP155]], i32* [[NN_CASTED32]], align 4 +// CHECK4-NEXT: [[TMP156:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 +// CHECK4-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i32* +// CHECK4-NEXT: store i32 [[TMP156]], i32* [[TMP158]], align 4 +// CHECK4-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i32* +// CHECK4-NEXT: store i32 [[TMP156]], i32* [[TMP160]], align 4 +// CHECK4-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP161]], align 4 +// CHECK4-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK4-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK4: omp_offload.failed36: -// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP166]]) #[[ATTR3]] +// CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP156]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK4: omp_offload.cont37: -// CHECK4-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK4-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK4-NEXT: ret i32 [[TMP176]] +// CHECK4-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK4-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK4-NEXT: ret i32 [[TMP166]] // // // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -5135,11 +5030,11 @@ // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5152,11 +5047,11 @@ // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5177,11 +5072,11 @@ // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK4-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5190,11 +5085,11 @@ // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK4-NEXT: store i32 [[NN]], i32* [[NN_ADDR]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5228,7 +5123,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP5]], align 4 // CHECK4-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK4-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 // CHECK4-NEXT: br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -5244,11 +5139,11 @@ // CHECK4-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK4-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i32 [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5306,7 +5201,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -5336,56 +5230,46 @@ // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK4-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK4-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 4 +// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK4-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK4-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -5395,15 +5279,15 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_IF_END]] // CHECK4: omp_if.end: -// CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK4-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK4-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK4-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK4-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK4-NEXT: ret i32 [[ADD3]] // // @@ -5474,7 +5358,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK4-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK4-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -5542,7 +5426,7 @@ // CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -5579,11 +5463,11 @@ // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5649,11 +5533,11 @@ // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK4-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK4-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5711,11 +5595,11 @@ // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7893,7 +7777,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS25:%.*]] = alloca [9 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS27:%.*]] = alloca [9 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK17-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [1 x i8*], align 8 @@ -8078,96 +7961,79 @@ // CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 // CHECK17-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK17-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 // CHECK17-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK17-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK17-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK17-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 // CHECK17-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 // CHECK17-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 // CHECK17-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK17-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 -// CHECK17-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK17-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 -// CHECK17-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK17-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 -// CHECK17-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK17-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK17-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 -// CHECK17-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK17-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK17-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 +// CHECK17-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK17-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK17-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK17-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 +// CHECK17-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 +// CHECK17-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK17-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] // CHECK17: omp_offload.failed28: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT29]] @@ -8178,52 +8044,52 @@ // CHECK17-NEXT: br label [[OMP_IF_END31]] // CHECK17: omp_if.end31: // CHECK17-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK17-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK17-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 // CHECK17-NEXT: [[CONV32:%.*]] = bitcast i64* [[NN_CASTED]] to i32* -// CHECK17-NEXT: store i32 [[TMP154]], i32* [[CONV32]], align 4 -// CHECK17-NEXT: [[TMP155:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK17-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK17-NEXT: store i64 [[TMP155]], i64* [[TMP157]], align 8 -// CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK17-NEXT: store i64 [[TMP155]], i64* [[TMP159]], align 8 -// CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP160]], align 8 -// CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK17-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK17-NEXT: store i32 [[TMP144]], i32* [[CONV32]], align 4 +// CHECK17-NEXT: [[TMP145:%.*]] = load i64, i64* [[NN_CASTED]], align 8 +// CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* +// CHECK17-NEXT: store i64 [[TMP145]], i64* [[TMP147]], align 8 +// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* +// CHECK17-NEXT: store i64 [[TMP145]], i64* [[TMP149]], align 8 +// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP150]], align 8 +// CHECK17-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK17-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK17: omp_offload.failed36: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP155]]) #[[ATTR3]] +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP145]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK17: omp_offload.cont37: -// CHECK17-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK17-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 // CHECK17-NEXT: [[CONV39:%.*]] = bitcast i64* [[NN_CASTED38]] to i32* -// CHECK17-NEXT: store i32 [[TMP165]], i32* [[CONV39]], align 4 -// CHECK17-NEXT: [[TMP166:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 -// CHECK17-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i64* -// CHECK17-NEXT: store i64 [[TMP166]], i64* [[TMP168]], align 8 -// CHECK17-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i64* -// CHECK17-NEXT: store i64 [[TMP166]], i64* [[TMP170]], align 8 -// CHECK17-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP171]], align 8 -// CHECK17-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK17-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] +// CHECK17-NEXT: store i32 [[TMP155]], i32* [[CONV39]], align 4 +// CHECK17-NEXT: [[TMP156:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 +// CHECK17-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i64* +// CHECK17-NEXT: store i64 [[TMP156]], i64* [[TMP158]], align 8 +// CHECK17-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i64* +// CHECK17-NEXT: store i64 [[TMP156]], i64* [[TMP160]], align 8 +// CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP161]], align 8 +// CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK17-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] // CHECK17: omp_offload.failed43: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP166]]) #[[ATTR3]] +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP156]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT44]] // CHECK17: omp_offload.cont44: -// CHECK17-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK17-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK17-NEXT: ret i32 [[TMP176]] +// CHECK17-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK17-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK17-NEXT: ret i32 [[TMP166]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -8605,11 +8471,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8624,11 +8490,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8652,11 +8518,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8666,11 +8532,11 @@ // CHECK17-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK17-NEXT: store i64 [[NN]], i64* [[NN_ADDR]], align 8 // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[NN_ADDR]] to i32* -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[CONV]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[CONV]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8705,7 +8571,7 @@ // CHECK17-NEXT: store i8* null, i8** [[TMP6]], align 8 // CHECK17-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK17-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -8721,11 +8587,11 @@ // CHECK17-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK17-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i64 [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8783,7 +8649,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK17-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -8814,56 +8679,46 @@ // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK17-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK17-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK17-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK17-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK17-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK17-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK17-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -8873,15 +8728,15 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_IF_END]] // CHECK17: omp_if.end: -// CHECK17-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK17-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK17-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK17-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK17-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK17-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK17-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK17-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK17-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK17-NEXT: ret i32 [[ADD4]] // // @@ -8953,7 +8808,7 @@ // CHECK17-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK17-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -9022,7 +8877,7 @@ // CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK17-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -9061,11 +8916,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9134,11 +8989,11 @@ // CHECK17-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK17-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK17-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9199,11 +9054,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9276,7 +9131,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS25:%.*]] = alloca [9 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS27:%.*]] = alloca [9 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK18-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[NN_CASTED:%.*]] = alloca i64, align 8 // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [1 x i8*], align 8 @@ -9461,96 +9315,79 @@ // CHECK18-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 // CHECK18-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK18-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK18-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK18-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK18-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 // CHECK18-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK18-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK18-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK18-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK18-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 // CHECK18-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 // CHECK18-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 5 +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 // CHECK18-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK18-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK18-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 5 -// CHECK18-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK18-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK18-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 -// CHECK18-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK18-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK18-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK18-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK18-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 -// CHECK18-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK18-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK18-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK18-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 -// CHECK18-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK18-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK18-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK18-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 6 +// CHECK18-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK18-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK18-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK18-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 7 +// CHECK18-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK18-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK18-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS27]], i64 0, i64 8 +// CHECK18-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS25]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS26]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK18-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] // CHECK18: omp_offload.failed28: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT29]] @@ -9561,52 +9398,52 @@ // CHECK18-NEXT: br label [[OMP_IF_END31]] // CHECK18: omp_if.end31: // CHECK18-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK18-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK18-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 // CHECK18-NEXT: [[CONV32:%.*]] = bitcast i64* [[NN_CASTED]] to i32* -// CHECK18-NEXT: store i32 [[TMP154]], i32* [[CONV32]], align 4 -// CHECK18-NEXT: [[TMP155:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK18-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK18-NEXT: store i64 [[TMP155]], i64* [[TMP157]], align 8 -// CHECK18-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK18-NEXT: store i64 [[TMP155]], i64* [[TMP159]], align 8 -// CHECK18-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP160]], align 8 -// CHECK18-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK18-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK18-NEXT: store i32 [[TMP144]], i32* [[CONV32]], align 4 +// CHECK18-NEXT: [[TMP145:%.*]] = load i64, i64* [[NN_CASTED]], align 8 +// CHECK18-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* +// CHECK18-NEXT: store i64 [[TMP145]], i64* [[TMP147]], align 8 +// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* +// CHECK18-NEXT: store i64 [[TMP145]], i64* [[TMP149]], align 8 +// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP150]], align 8 +// CHECK18-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK18-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK18: omp_offload.failed36: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP155]]) #[[ATTR3]] +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i64 [[TMP145]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK18: omp_offload.cont37: -// CHECK18-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK18-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 // CHECK18-NEXT: [[CONV39:%.*]] = bitcast i64* [[NN_CASTED38]] to i32* -// CHECK18-NEXT: store i32 [[TMP165]], i32* [[CONV39]], align 4 -// CHECK18-NEXT: [[TMP166:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 -// CHECK18-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i64* -// CHECK18-NEXT: store i64 [[TMP166]], i64* [[TMP168]], align 8 -// CHECK18-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i64* -// CHECK18-NEXT: store i64 [[TMP166]], i64* [[TMP170]], align 8 -// CHECK18-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP171]], align 8 -// CHECK18-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK18-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] +// CHECK18-NEXT: store i32 [[TMP155]], i32* [[CONV39]], align 4 +// CHECK18-NEXT: [[TMP156:%.*]] = load i64, i64* [[NN_CASTED38]], align 8 +// CHECK18-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i64* +// CHECK18-NEXT: store i64 [[TMP156]], i64* [[TMP158]], align 8 +// CHECK18-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i64* +// CHECK18-NEXT: store i64 [[TMP156]], i64* [[TMP160]], align 8 +// CHECK18-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS42]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP161]], align 8 +// CHECK18-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS40]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS41]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK18-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] // CHECK18: omp_offload.failed43: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP166]]) #[[ATTR3]] +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i64 [[TMP156]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT44]] // CHECK18: omp_offload.cont44: -// CHECK18-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK18-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK18-NEXT: ret i32 [[TMP176]] +// CHECK18-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK18-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK18-NEXT: ret i32 [[TMP166]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -9988,11 +9825,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10007,11 +9844,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10035,11 +9872,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[NN_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP0]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP1:%.*]] = load i64, i64* [[NN_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP1]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10049,11 +9886,11 @@ // CHECK18-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 // CHECK18-NEXT: store i64 [[NN]], i64* [[NN_ADDR]], align 8 // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[NN_ADDR]] to i32* -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[CONV]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[CONV]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10088,7 +9925,7 @@ // CHECK18-NEXT: store i8* null, i8** [[TMP6]], align 8 // CHECK18-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP9:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP7]], i8** [[TMP8]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP10:%.*]] = icmp ne i32 [[TMP9]], 0 // CHECK18-NEXT: br i1 [[TMP10]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -10104,11 +9941,11 @@ // CHECK18-NEXT: [[VLA_ADDR:%.*]] = alloca i64, align 8 // CHECK18-NEXT: store i64 [[VLA]], i64* [[VLA_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i64 [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10166,7 +10003,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK18-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 8 @@ -10197,56 +10033,46 @@ // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK18-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK18-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK18-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK18-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK18-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK18-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK18-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -10256,15 +10082,15 @@ // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_IF_END]] // CHECK18: omp_if.end: -// CHECK18-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK18-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK18-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK18-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK18-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK18-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK18-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK18-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK18-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK18-NEXT: ret i32 [[ADD4]] // // @@ -10336,7 +10162,7 @@ // CHECK18-NEXT: store i8* null, i8** [[TMP26]], align 8 // CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK18-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -10405,7 +10231,7 @@ // CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK18-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -10444,11 +10270,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10517,11 +10343,11 @@ // CHECK18-NEXT: [[CONV5:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK18-NEXT: store i8 [[TMP5]], i8* [[CONV5]], align 1 // CHECK18-NEXT: [[TMP6:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10582,11 +10408,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10659,7 +10485,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [9 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [9 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK19-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x i8*], align 4 @@ -10839,96 +10664,79 @@ // CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK19-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK19-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK19-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 // CHECK19-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK19-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK19-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 -// CHECK19-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK19-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK19-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK19-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK19-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 -// CHECK19-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK19-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK19-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 -// CHECK19-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK19-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 -// CHECK19-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK19-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK19-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 +// CHECK19-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK19-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK19-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 +// CHECK19-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 +// CHECK19-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK19-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK19: omp_offload.failed23: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -10939,50 +10747,50 @@ // CHECK19-NEXT: br label [[OMP_IF_END26]] // CHECK19: omp_if.end26: // CHECK19-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK19-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK19-NEXT: store i32 [[TMP154]], i32* [[NN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK19-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32* -// CHECK19-NEXT: store i32 [[TMP155]], i32* [[TMP157]], align 4 -// CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32* -// CHECK19-NEXT: store i32 [[TMP155]], i32* [[TMP159]], align 4 -// CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP160]], align 4 -// CHECK19-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK19-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK19-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK19-NEXT: store i32 [[TMP144]], i32* [[NN_CASTED]], align 4 +// CHECK19-NEXT: [[TMP145:%.*]] = load i32, i32* [[NN_CASTED]], align 4 +// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* +// CHECK19-NEXT: store i32 [[TMP145]], i32* [[TMP147]], align 4 +// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* +// CHECK19-NEXT: store i32 [[TMP145]], i32* [[TMP149]], align 4 +// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP150]], align 4 +// CHECK19-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK19-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK19: omp_offload.failed30: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP155]]) #[[ATTR3]] +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP145]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT31]] // CHECK19: omp_offload.cont31: -// CHECK19-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK19-NEXT: store i32 [[TMP165]], i32* [[NN_CASTED32]], align 4 -// CHECK19-NEXT: [[TMP166:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 -// CHECK19-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i32* -// CHECK19-NEXT: store i32 [[TMP166]], i32* [[TMP168]], align 4 -// CHECK19-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i32* -// CHECK19-NEXT: store i32 [[TMP166]], i32* [[TMP170]], align 4 -// CHECK19-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP171]], align 4 -// CHECK19-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK19-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK19-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK19-NEXT: store i32 [[TMP155]], i32* [[NN_CASTED32]], align 4 +// CHECK19-NEXT: [[TMP156:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 +// CHECK19-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i32* +// CHECK19-NEXT: store i32 [[TMP156]], i32* [[TMP158]], align 4 +// CHECK19-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i32* +// CHECK19-NEXT: store i32 [[TMP156]], i32* [[TMP160]], align 4 +// CHECK19-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP161]], align 4 +// CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK19-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK19: omp_offload.failed36: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP166]]) #[[ATTR3]] +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP156]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK19: omp_offload.cont37: -// CHECK19-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK19-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK19-NEXT: ret i32 [[TMP176]] +// CHECK19-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK19-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK19-NEXT: ret i32 [[TMP166]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -11349,11 +11157,11 @@ // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11366,11 +11174,11 @@ // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11391,11 +11199,11 @@ // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11404,11 +11212,11 @@ // CHECK19-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK19-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK19-NEXT: store i32 [[NN]], i32* [[NN_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11442,7 +11250,7 @@ // CHECK19-NEXT: store i8* null, i8** [[TMP5]], align 4 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 // CHECK19-NEXT: br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -11458,11 +11266,11 @@ // CHECK19-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i32 [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11520,7 +11328,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK19-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -11550,56 +11357,46 @@ // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK19-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK19-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK19-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK19-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK19-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK19-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 4 +// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK19-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -11609,15 +11406,15 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_IF_END]] // CHECK19: omp_if.end: -// CHECK19-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK19-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK19-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK19-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK19-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK19-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK19-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK19-NEXT: ret i32 [[ADD3]] // // @@ -11688,7 +11485,7 @@ // CHECK19-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK19-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -11756,7 +11553,7 @@ // CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK19-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -11793,11 +11590,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11863,11 +11660,11 @@ // CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK19-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK19-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -11925,11 +11722,11 @@ // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12001,7 +11798,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [9 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [9 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK20-NEXT: [[NN:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[NN_CASTED:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS27:%.*]] = alloca [1 x i8*], align 4 @@ -12181,96 +11977,79 @@ // CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK20-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK20-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK20-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK20-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK20-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 // CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK20-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK20-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK20-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 // CHECK20-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 // CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 5 +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 // CHECK20-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK20-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK20-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 5 -// CHECK20-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK20-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK20-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK20-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK20-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 -// CHECK20-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK20-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK20-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK20-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 -// CHECK20-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK20-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK20-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK20-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 -// CHECK20-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK20-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK20-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK20-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 6 +// CHECK20-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK20-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK20-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 7 +// CHECK20-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK20-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i32 0, i32 8 +// CHECK20-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK20-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED23:%.*]], label [[OMP_OFFLOAD_CONT24:%.*]] // CHECK20: omp_offload.failed23: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT24]] @@ -12281,50 +12060,50 @@ // CHECK20-NEXT: br label [[OMP_IF_END26]] // CHECK20: omp_if.end26: // CHECK20-NEXT: store i32 0, i32* [[NN]], align 4 -// CHECK20-NEXT: [[TMP154:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK20-NEXT: store i32 [[TMP154]], i32* [[NN_CASTED]], align 4 -// CHECK20-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK20-NEXT: [[TMP156:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32* -// CHECK20-NEXT: store i32 [[TMP155]], i32* [[TMP157]], align 4 -// CHECK20-NEXT: [[TMP158:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32* -// CHECK20-NEXT: store i32 [[TMP155]], i32* [[TMP159]], align 4 -// CHECK20-NEXT: [[TMP160:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP160]], align 4 -// CHECK20-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP163:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP161]], i8** [[TMP162]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP164:%.*]] = icmp ne i32 [[TMP163]], 0 -// CHECK20-NEXT: br i1 [[TMP164]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK20-NEXT: [[TMP144:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK20-NEXT: store i32 [[TMP144]], i32* [[NN_CASTED]], align 4 +// CHECK20-NEXT: [[TMP145:%.*]] = load i32, i32* [[NN_CASTED]], align 4 +// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* +// CHECK20-NEXT: store i32 [[TMP145]], i32* [[TMP147]], align 4 +// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* +// CHECK20-NEXT: store i32 [[TMP145]], i32* [[TMP149]], align 4 +// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS29]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP150]], align 4 +// CHECK20-NEXT: [[TMP151:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS27]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP152:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS28]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP153:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154.region_id, i32 1, i8** [[TMP151]], i8** [[TMP152]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP154:%.*]] = icmp ne i32 [[TMP153]], 0 +// CHECK20-NEXT: br i1 [[TMP154]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK20: omp_offload.failed30: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP155]]) #[[ATTR3]] +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l154(i32 [[TMP145]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT31]] // CHECK20: omp_offload.cont31: -// CHECK20-NEXT: [[TMP165:%.*]] = load i32, i32* [[NN]], align 4 -// CHECK20-NEXT: store i32 [[TMP165]], i32* [[NN_CASTED32]], align 4 -// CHECK20-NEXT: [[TMP166:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 -// CHECK20-NEXT: [[TMP167:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP168:%.*]] = bitcast i8** [[TMP167]] to i32* -// CHECK20-NEXT: store i32 [[TMP166]], i32* [[TMP168]], align 4 -// CHECK20-NEXT: [[TMP169:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP170:%.*]] = bitcast i8** [[TMP169]] to i32* -// CHECK20-NEXT: store i32 [[TMP166]], i32* [[TMP170]], align 4 -// CHECK20-NEXT: [[TMP171:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP171]], align 4 -// CHECK20-NEXT: [[TMP172:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP173:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP174:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP172]], i8** [[TMP173]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP175:%.*]] = icmp ne i32 [[TMP174]], 0 -// CHECK20-NEXT: br i1 [[TMP175]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] +// CHECK20-NEXT: [[TMP155:%.*]] = load i32, i32* [[NN]], align 4 +// CHECK20-NEXT: store i32 [[TMP155]], i32* [[NN_CASTED32]], align 4 +// CHECK20-NEXT: [[TMP156:%.*]] = load i32, i32* [[NN_CASTED32]], align 4 +// CHECK20-NEXT: [[TMP157:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP158:%.*]] = bitcast i8** [[TMP157]] to i32* +// CHECK20-NEXT: store i32 [[TMP156]], i32* [[TMP158]], align 4 +// CHECK20-NEXT: [[TMP159:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP160:%.*]] = bitcast i8** [[TMP159]] to i32* +// CHECK20-NEXT: store i32 [[TMP156]], i32* [[TMP160]], align 4 +// CHECK20-NEXT: [[TMP161:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP161]], align 4 +// CHECK20-NEXT: [[TMP162:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP163:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP164:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157.region_id, i32 1, i8** [[TMP162]], i8** [[TMP163]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.18, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.19, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP165:%.*]] = icmp ne i32 [[TMP164]], 0 +// CHECK20-NEXT: br i1 [[TMP165]], label [[OMP_OFFLOAD_FAILED36:%.*]], label [[OMP_OFFLOAD_CONT37:%.*]] // CHECK20: omp_offload.failed36: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP166]]) #[[ATTR3]] +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l157(i32 [[TMP156]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT37]] // CHECK20: omp_offload.cont37: -// CHECK20-NEXT: [[TMP176:%.*]] = load i32, i32* [[A]], align 4 -// CHECK20-NEXT: [[TMP177:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP177]]) -// CHECK20-NEXT: ret i32 [[TMP176]] +// CHECK20-NEXT: [[TMP166:%.*]] = load i32, i32* [[A]], align 4 +// CHECK20-NEXT: [[TMP167:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP167]]) +// CHECK20-NEXT: ret i32 [[TMP166]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l101 @@ -12691,11 +12470,11 @@ // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12708,11 +12487,11 @@ // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12733,11 +12512,11 @@ // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[NN_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP0]], i32* [[NN_CASTED]], align 4 // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[NN_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP1]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..16 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[NN:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12746,11 +12525,11 @@ // CHECK20-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 4 // CHECK20-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 4 // CHECK20-NEXT: store i32 [[NN]], i32* [[NN_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), i32* [[NN_ADDR]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..17 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[NN:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12784,7 +12563,7 @@ // CHECK20-NEXT: store i8* null, i8** [[TMP5]], align 4 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP7:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP8:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z6bazzzziPi_l182.region_id, i32 1, i8** [[TMP6]], i8** [[TMP7]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.21, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.22, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0 // CHECK20-NEXT: br i1 [[TMP9]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -12800,11 +12579,11 @@ // CHECK20-NEXT: [[VLA_ADDR:%.*]] = alloca i32, align 4 // CHECK20-NEXT: store i32 [[VLA]], i32* [[VLA_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32)* @.omp_outlined..20 to void (i32*, i32*, ...)*), i32 [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..19 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..20 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[VLA:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12862,7 +12641,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 // CHECK20-NEXT: [[THIS1:%.*]] = load %struct.S1*, %struct.S1** [[THIS_ADDR]], align 4 @@ -12892,56 +12670,46 @@ // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK20-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK20-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK20-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK20-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.23, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK20-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK20-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 4), align 4 +// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK20-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -12951,15 +12719,15 @@ // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l227(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_IF_END]] // CHECK20: omp_if.end: -// CHECK20-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK20-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK20-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK20-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK20-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK20-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK20-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK20-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK20-NEXT: ret i32 [[ADD3]] // // @@ -13030,7 +12798,7 @@ // CHECK20-NEXT: store i8* null, i8** [[TMP26]], align 4 // CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.25, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.26, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP29:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l209.region_id, i32 4, i8** [[TMP27]], i8** [[TMP28]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP30:%.*]] = icmp ne i32 [[TMP29]], 0 // CHECK20-NEXT: br i1 [[TMP30]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -13098,7 +12866,7 @@ // CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l192.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.30, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.31, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK20-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -13135,11 +12903,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13205,11 +12973,11 @@ // CHECK20-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK20-NEXT: store i8 [[TMP5]], i8* [[CONV3]], align 1 // CHECK20-NEXT: [[TMP6:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..24 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..24 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13267,11 +13035,11 @@ // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB1]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..29 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..27 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..29 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_codegen.cpp @@ -333,7 +333,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [10 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [10 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK1-NEXT: [[_TMP32:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -518,107 +517,88 @@ // CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK1-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* // CHECK1-NEXT: store i64 [[TMP91]], i64* [[TMP101]], align 8 -// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 4, i64* [[TMP102]], align 8 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 8 -// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 8 -// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 40, i64* [[TMP108]], align 8 -// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 +// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 8 +// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK1-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP109]], align 8 +// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 // CHECK1-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP111]], align 8 -// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP113]], align 8 -// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK1-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP117]], align 8 -// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP119]], align 8 -// CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 [[TMP95]], i64* [[TMP120]], align 8 -// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 8 -// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 8 -// CHECK1-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 400, i64* [[TMP126]], align 8 -// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 +// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 +// CHECK1-NEXT: store i64 [[TMP95]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 8 +// CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 8 +// CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP122]], align 8 +// CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP124]], align 8 +// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP126]], align 8 +// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 // CHECK1-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 // CHECK1-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP129]], align 8 -// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP129]], align 8 +// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 // CHECK1-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP131]], align 8 -// CHECK1-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK1-NEXT: store i64 8, i64* [[TMP132]], align 8 -// CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 -// CHECK1-NEXT: store i8* null, i8** [[TMP133]], align 8 -// CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP135]], align 8 -// CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP137]], align 8 -// CHECK1-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK1-NEXT: store i64 8, i64* [[TMP138]], align 8 -// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 -// CHECK1-NEXT: store i8* null, i8** [[TMP139]], align 8 -// CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP141]], align 8 -// CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP143]], align 8 -// CHECK1-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK1-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 8 -// CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 -// CHECK1-NEXT: store i8* null, i8** [[TMP145]], align 8 -// CHECK1-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 8 -// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 8 -// CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16, i64* [[TMP150]], align 8 -// CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 -// CHECK1-NEXT: store i8* null, i8** [[TMP151]], align 8 -// CHECK1-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i64* -// CHECK1-NEXT: store i64 [[TMP93]], i64* [[TMP153]], align 8 -// CHECK1-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 -// CHECK1-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i64* -// CHECK1-NEXT: store i64 [[TMP93]], i64* [[TMP155]], align 8 -// CHECK1-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK1-NEXT: store i64 4, i64* [[TMP156]], align 8 -// CHECK1-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 -// CHECK1-NEXT: store i8* null, i8** [[TMP157]], align 8 -// CHECK1-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP131]], align 8 +// CHECK1-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 +// CHECK1-NEXT: store i8* null, i8** [[TMP132]], align 8 +// CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 +// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP136]], align 8 +// CHECK1-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 +// CHECK1-NEXT: store i8* null, i8** [[TMP137]], align 8 +// CHECK1-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 8 +// CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 8 +// CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 +// CHECK1-NEXT: store i8* null, i8** [[TMP142]], align 8 +// CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 +// CHECK1-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* +// CHECK1-NEXT: store i64 [[TMP93]], i64* [[TMP144]], align 8 +// CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 +// CHECK1-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* +// CHECK1-NEXT: store i64 [[TMP93]], i64* [[TMP146]], align 8 +// CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 +// CHECK1-NEXT: store i8* null, i8** [[TMP147]], align 8 +// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK1-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK1-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK1-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] +// CHECK1-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK1-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] // CHECK1: omp_offload.failed33: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT34]] @@ -628,10 +608,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END36]] // CHECK1: omp_if.end36: -// CHECK1-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK1-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK1-NEXT: ret i32 [[TMP163]] +// CHECK1-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK1-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK1-NEXT: ret i32 [[TMP152]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -1326,7 +1306,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -1358,57 +1337,47 @@ // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK1-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK1-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1418,15 +1387,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK1-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK1-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK1-NEXT: ret i32 [[ADD4]] // // @@ -1531,7 +1500,7 @@ // CHECK1-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK1-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK1-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK1-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1602,7 +1571,7 @@ // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1641,11 +1610,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1772,11 +1741,11 @@ // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1922,11 +1891,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2053,7 +2022,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [10 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [10 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [10 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK2-NEXT: [[_TMP32:%.*]] = alloca i32, align 4 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -2238,107 +2206,88 @@ // CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK2-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* // CHECK2-NEXT: store i64 [[TMP91]], i64* [[TMP101]], align 8 -// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 4, i64* [[TMP102]], align 8 -// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 8 -// CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 8 -// CHECK2-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 40, i64* [[TMP108]], align 8 -// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 +// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 8 +// CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK2-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP109]], align 8 +// CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 // CHECK2-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP111]], align 8 -// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP113]], align 8 -// CHECK2-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK2-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP117]], align 8 -// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP119]], align 8 -// CHECK2-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 [[TMP95]], i64* [[TMP120]], align 8 -// CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK2-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 8 -// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 8 -// CHECK2-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 400, i64* [[TMP126]], align 8 -// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 +// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 +// CHECK2-NEXT: store i64 [[TMP95]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 8 +// CHECK2-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 8 +// CHECK2-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP122]], align 8 +// CHECK2-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP124]], align 8 +// CHECK2-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP126]], align 8 +// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 // CHECK2-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK2-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 // CHECK2-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP129]], align 8 -// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP129]], align 8 +// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 // CHECK2-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP131]], align 8 -// CHECK2-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK2-NEXT: store i64 8, i64* [[TMP132]], align 8 -// CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 -// CHECK2-NEXT: store i8* null, i8** [[TMP133]], align 8 -// CHECK2-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP135]], align 8 -// CHECK2-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP137]], align 8 -// CHECK2-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK2-NEXT: store i64 8, i64* [[TMP138]], align 8 -// CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 -// CHECK2-NEXT: store i8* null, i8** [[TMP139]], align 8 -// CHECK2-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP141]], align 8 -// CHECK2-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP143]], align 8 -// CHECK2-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK2-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 8 -// CHECK2-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 -// CHECK2-NEXT: store i8* null, i8** [[TMP145]], align 8 -// CHECK2-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 8 -// CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 8 -// CHECK2-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK2-NEXT: store i64 16, i64* [[TMP150]], align 8 -// CHECK2-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 -// CHECK2-NEXT: store i8* null, i8** [[TMP151]], align 8 -// CHECK2-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i64* -// CHECK2-NEXT: store i64 [[TMP93]], i64* [[TMP153]], align 8 -// CHECK2-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 -// CHECK2-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i64* -// CHECK2-NEXT: store i64 [[TMP93]], i64* [[TMP155]], align 8 -// CHECK2-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK2-NEXT: store i64 4, i64* [[TMP156]], align 8 -// CHECK2-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 -// CHECK2-NEXT: store i8* null, i8** [[TMP157]], align 8 -// CHECK2-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP131]], align 8 +// CHECK2-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 +// CHECK2-NEXT: store i8* null, i8** [[TMP132]], align 8 +// CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 +// CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP136]], align 8 +// CHECK2-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 +// CHECK2-NEXT: store i8* null, i8** [[TMP137]], align 8 +// CHECK2-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 8 +// CHECK2-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 8 +// CHECK2-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 +// CHECK2-NEXT: store i8* null, i8** [[TMP142]], align 8 +// CHECK2-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 +// CHECK2-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* +// CHECK2-NEXT: store i64 [[TMP93]], i64* [[TMP144]], align 8 +// CHECK2-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 +// CHECK2-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* +// CHECK2-NEXT: store i64 [[TMP93]], i64* [[TMP146]], align 8 +// CHECK2-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 +// CHECK2-NEXT: store i8* null, i8** [[TMP147]], align 8 +// CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK2-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK2-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK2-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] +// CHECK2-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK2-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] // CHECK2: omp_offload.failed33: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT34]] @@ -2348,10 +2297,10 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_IF_END36]] // CHECK2: omp_if.end36: -// CHECK2-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK2-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK2-NEXT: ret i32 [[TMP163]] +// CHECK2-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK2-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK2-NEXT: ret i32 [[TMP152]] // // // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -3046,7 +2995,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -3078,57 +3026,47 @@ // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK2-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK2-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK2-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK2-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3138,15 +3076,15 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK2-NEXT: br label [[OMP_IF_END]] // CHECK2: omp_if.end: -// CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK2-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK2-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK2-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK2-NEXT: ret i32 [[ADD4]] // // @@ -3251,7 +3189,7 @@ // CHECK2-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK2-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK2-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK2-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK2-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3322,7 +3260,7 @@ // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3361,11 +3299,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3492,11 +3430,11 @@ // CHECK2-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK2-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3642,11 +3580,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3773,7 +3711,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [10 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [10 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK3-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -3952,107 +3889,88 @@ // CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK3-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* // CHECK3-NEXT: store i32 [[TMP89]], i32* [[TMP101]], align 4 -// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 4, i64* [[TMP102]], align 4 -// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP103]], align 4 -// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 4 -// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 4 -// CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 40, i64* [[TMP108]], align 4 -// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP109]], align 4 -// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP102]], align 4 +// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 +// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 4 +// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP107]], align 4 +// CHECK3-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP109]], align 4 +// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 // CHECK3-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP111]], align 4 -// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP113]], align 4 -// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP114]], align 4 -// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP115]], align 4 -// CHECK3-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP117]], align 4 -// CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP119]], align 4 -// CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 [[TMP94]], i64* [[TMP120]], align 4 -// CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP121]], align 4 -// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 4 -// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 4 -// CHECK3-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 400, i64* [[TMP126]], align 4 -// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP112]], align 4 +// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 +// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 +// CHECK3-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP117]], align 4 +// CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 4 +// CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 4 +// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP122]], align 4 +// CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP124]], align 4 +// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP126]], align 4 +// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 // CHECK3-NEXT: store i8* null, i8** [[TMP127]], align 4 -// CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 // CHECK3-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP129]], align 4 -// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP129]], align 4 +// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 // CHECK3-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP131]], align 4 -// CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK3-NEXT: store i64 4, i64* [[TMP132]], align 4 -// CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 -// CHECK3-NEXT: store i8* null, i8** [[TMP133]], align 4 -// CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP135]], align 4 -// CHECK3-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP137]], align 4 -// CHECK3-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK3-NEXT: store i64 4, i64* [[TMP138]], align 4 -// CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 -// CHECK3-NEXT: store i8* null, i8** [[TMP139]], align 4 -// CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP141]], align 4 -// CHECK3-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP143]], align 4 -// CHECK3-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK3-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 4 -// CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 -// CHECK3-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK3-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 4 -// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 4 -// CHECK3-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK3-NEXT: store i64 12, i64* [[TMP150]], align 4 -// CHECK3-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 -// CHECK3-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK3-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK3-NEXT: store i32 [[TMP91]], i32* [[TMP153]], align 4 -// CHECK3-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 -// CHECK3-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK3-NEXT: store i32 [[TMP91]], i32* [[TMP155]], align 4 -// CHECK3-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK3-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK3-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 -// CHECK3-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK3-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP131]], align 4 +// CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 +// CHECK3-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP134]], align 4 +// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP136]], align 4 +// CHECK3-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 +// CHECK3-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK3-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 4 +// CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 4 +// CHECK3-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 +// CHECK3-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 +// CHECK3-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* +// CHECK3-NEXT: store i32 [[TMP91]], i32* [[TMP144]], align 4 +// CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 +// CHECK3-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i32* +// CHECK3-NEXT: store i32 [[TMP91]], i32* [[TMP146]], align 4 +// CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 +// CHECK3-NEXT: store i8* null, i8** [[TMP147]], align 4 +// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK3-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK3-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK3-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK3-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK3-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] // CHECK3: omp_offload.failed27: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT28]] @@ -4062,10 +3980,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END30]] // CHECK3: omp_if.end30: -// CHECK3-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK3-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK3-NEXT: ret i32 [[TMP163]] +// CHECK3-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK3-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK3-NEXT: ret i32 [[TMP152]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -4744,7 +4662,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -4775,57 +4692,47 @@ // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK3-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK3-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK3-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4835,15 +4742,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK3-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK3-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK3-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -4946,7 +4853,7 @@ // CHECK3-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK3-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK3-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK3-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK3-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -5016,7 +4923,7 @@ // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -5053,11 +4960,11 @@ // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5179,11 +5086,11 @@ // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5325,11 +5232,11 @@ // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5455,7 +5362,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [10 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [10 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [10 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK4-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -5634,107 +5540,88 @@ // CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK4-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* // CHECK4-NEXT: store i32 [[TMP89]], i32* [[TMP101]], align 4 -// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 4, i64* [[TMP102]], align 4 -// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP103]], align 4 -// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 4 -// CHECK4-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 4 -// CHECK4-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 40, i64* [[TMP108]], align 4 -// CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP109]], align 4 -// CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP102]], align 4 +// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 +// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 4 +// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP107]], align 4 +// CHECK4-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP109]], align 4 +// CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 // CHECK4-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP111]], align 4 -// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP113]], align 4 -// CHECK4-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP114]], align 4 -// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP115]], align 4 -// CHECK4-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP117]], align 4 -// CHECK4-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP119]], align 4 -// CHECK4-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 [[TMP94]], i64* [[TMP120]], align 4 -// CHECK4-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP121]], align 4 -// CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 4 -// CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 4 -// CHECK4-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 400, i64* [[TMP126]], align 4 -// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP112]], align 4 +// CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 +// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 +// CHECK4-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP117]], align 4 +// CHECK4-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 4 +// CHECK4-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 4 +// CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP122]], align 4 +// CHECK4-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP124]], align 4 +// CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP126]], align 4 +// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 // CHECK4-NEXT: store i8* null, i8** [[TMP127]], align 4 -// CHECK4-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 // CHECK4-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP129]], align 4 -// CHECK4-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP129]], align 4 +// CHECK4-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 // CHECK4-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP131]], align 4 -// CHECK4-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK4-NEXT: store i64 4, i64* [[TMP132]], align 4 -// CHECK4-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 -// CHECK4-NEXT: store i8* null, i8** [[TMP133]], align 4 -// CHECK4-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP135]], align 4 -// CHECK4-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP137]], align 4 -// CHECK4-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK4-NEXT: store i64 4, i64* [[TMP138]], align 4 -// CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 -// CHECK4-NEXT: store i8* null, i8** [[TMP139]], align 4 -// CHECK4-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP141]], align 4 -// CHECK4-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP143]], align 4 -// CHECK4-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK4-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 4 -// CHECK4-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 -// CHECK4-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK4-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 4 -// CHECK4-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 4 -// CHECK4-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK4-NEXT: store i64 12, i64* [[TMP150]], align 4 -// CHECK4-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 -// CHECK4-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK4-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK4-NEXT: store i32 [[TMP91]], i32* [[TMP153]], align 4 -// CHECK4-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 -// CHECK4-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK4-NEXT: store i32 [[TMP91]], i32* [[TMP155]], align 4 -// CHECK4-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK4-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK4-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 -// CHECK4-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK4-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP131]], align 4 +// CHECK4-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 +// CHECK4-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK4-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP134]], align 4 +// CHECK4-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP136]], align 4 +// CHECK4-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 +// CHECK4-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK4-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 4 +// CHECK4-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 4 +// CHECK4-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 +// CHECK4-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK4-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 +// CHECK4-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* +// CHECK4-NEXT: store i32 [[TMP91]], i32* [[TMP144]], align 4 +// CHECK4-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 +// CHECK4-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i32* +// CHECK4-NEXT: store i32 [[TMP91]], i32* [[TMP146]], align 4 +// CHECK4-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 +// CHECK4-NEXT: store i8* null, i8** [[TMP147]], align 4 +// CHECK4-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK4-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK4-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK4-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK4-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK4-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] // CHECK4: omp_offload.failed27: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT28]] @@ -5744,10 +5631,10 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_IF_END30]] // CHECK4: omp_if.end30: -// CHECK4-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK4-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK4-NEXT: ret i32 [[TMP163]] +// CHECK4-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK4-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK4-NEXT: ret i32 [[TMP152]] // // // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -6426,7 +6313,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -6457,57 +6343,47 @@ // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK4-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK4-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK4-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK4-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK4-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6517,15 +6393,15 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK4-NEXT: br label [[OMP_IF_END]] // CHECK4: omp_if.end: -// CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK4-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK4-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK4-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK4-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK4-NEXT: ret i32 [[ADD3]] // // @@ -6628,7 +6504,7 @@ // CHECK4-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK4-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK4-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK4-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK4-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6698,7 +6574,7 @@ // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6735,11 +6611,11 @@ // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6861,11 +6737,11 @@ // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK4-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7007,11 +6883,11 @@ // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -10483,7 +10359,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [10 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [10 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [10 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK17-NEXT: [[_TMP32:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -10668,107 +10543,88 @@ // CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK17-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* // CHECK17-NEXT: store i64 [[TMP91]], i64* [[TMP101]], align 8 -// CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP102]], align 8 -// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 8 -// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 8 -// CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 40, i64* [[TMP108]], align 8 -// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 +// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK17-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 8 +// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP109]], align 8 +// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 // CHECK17-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* // CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP111]], align 8 -// CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP113]], align 8 -// CHECK17-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK17-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP117]], align 8 -// CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK17-NEXT: store float* [[VLA]], float** [[TMP119]], align 8 -// CHECK17-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP95]], i64* [[TMP120]], align 8 -// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 8 -// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 8 -// CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 400, i64* [[TMP126]], align 8 -// CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 +// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK17-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 +// CHECK17-NEXT: store i64 [[TMP95]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 8 +// CHECK17-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK17-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 8 +// CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP122]], align 8 +// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP124]], align 8 +// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* +// CHECK17-NEXT: store i64 5, i64* [[TMP126]], align 8 +// CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 // CHECK17-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 // CHECK17-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP129]], align 8 -// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP129]], align 8 +// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 // CHECK17-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i64* -// CHECK17-NEXT: store i64 5, i64* [[TMP131]], align 8 -// CHECK17-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK17-NEXT: store i64 8, i64* [[TMP132]], align 8 -// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 -// CHECK17-NEXT: store i8* null, i8** [[TMP133]], align 8 -// CHECK17-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP135]], align 8 -// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP137]], align 8 -// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK17-NEXT: store i64 8, i64* [[TMP138]], align 8 -// CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 -// CHECK17-NEXT: store i8* null, i8** [[TMP139]], align 8 -// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP141]], align 8 -// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP143]], align 8 -// CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK17-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 8 -// CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 -// CHECK17-NEXT: store i8* null, i8** [[TMP145]], align 8 -// CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 8 -// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 8 -// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK17-NEXT: store i64 16, i64* [[TMP150]], align 8 -// CHECK17-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 -// CHECK17-NEXT: store i8* null, i8** [[TMP151]], align 8 -// CHECK17-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 -// CHECK17-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i64* -// CHECK17-NEXT: store i64 [[TMP93]], i64* [[TMP153]], align 8 -// CHECK17-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 -// CHECK17-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i64* -// CHECK17-NEXT: store i64 [[TMP93]], i64* [[TMP155]], align 8 -// CHECK17-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK17-NEXT: store i64 4, i64* [[TMP156]], align 8 -// CHECK17-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 -// CHECK17-NEXT: store i8* null, i8** [[TMP157]], align 8 -// CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP131]], align 8 +// CHECK17-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 +// CHECK17-NEXT: store i8* null, i8** [[TMP132]], align 8 +// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 +// CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK17-NEXT: store double* [[VLA1]], double** [[TMP136]], align 8 +// CHECK17-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 +// CHECK17-NEXT: store i8* null, i8** [[TMP137]], align 8 +// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 8 +// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK17-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 8 +// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 +// CHECK17-NEXT: store i8* null, i8** [[TMP142]], align 8 +// CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 +// CHECK17-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* +// CHECK17-NEXT: store i64 [[TMP93]], i64* [[TMP144]], align 8 +// CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 +// CHECK17-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* +// CHECK17-NEXT: store i64 [[TMP93]], i64* [[TMP146]], align 8 +// CHECK17-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 +// CHECK17-NEXT: store i8* null, i8** [[TMP147]], align 8 +// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK17-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] +// CHECK17-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK17-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] // CHECK17: omp_offload.failed33: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT34]] @@ -10778,10 +10634,10 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_IF_END36]] // CHECK17: omp_if.end36: -// CHECK17-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK17-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK17-NEXT: ret i32 [[TMP163]] +// CHECK17-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK17-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK17-NEXT: ret i32 [[TMP152]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -11476,7 +11332,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -11508,57 +11363,47 @@ // CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK17-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK17-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK17-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK17-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK17-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK17-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK17-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK17-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK17-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -11568,15 +11413,15 @@ // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK17-NEXT: br label [[OMP_IF_END]] // CHECK17: omp_if.end: -// CHECK17-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK17-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK17-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK17-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK17-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK17-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK17-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK17-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK17-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK17-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK17-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK17-NEXT: ret i32 [[ADD4]] // // @@ -11681,7 +11526,7 @@ // CHECK17-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK17-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK17-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK17-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -11752,7 +11597,7 @@ // CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK17-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -11791,11 +11636,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11922,11 +11767,11 @@ // CHECK17-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK17-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK17-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12072,11 +11917,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK17-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12203,7 +12048,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS29:%.*]] = alloca [10 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS30:%.*]] = alloca [10 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS31:%.*]] = alloca [10 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK18-NEXT: [[_TMP32:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -12388,107 +12232,88 @@ // CHECK18-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK18-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i64* // CHECK18-NEXT: store i64 [[TMP91]], i64* [[TMP101]], align 8 -// CHECK18-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP102]], align 8 -// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP103]], align 8 -// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 8 -// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 8 -// CHECK18-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 40, i64* [[TMP108]], align 8 -// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP102]], align 8 +// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 +// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK18-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 8 +// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP107]], align 8 +// CHECK18-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP109]], align 8 +// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 // CHECK18-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* // CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP111]], align 8 -// CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP113]], align 8 -// CHECK18-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK18-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP117]], align 8 -// CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK18-NEXT: store float* [[VLA]], float** [[TMP119]], align 8 -// CHECK18-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP95]], i64* [[TMP120]], align 8 -// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK18-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 8 -// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 8 -// CHECK18-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 400, i64* [[TMP126]], align 8 -// CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP112]], align 8 +// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 +// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK18-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 +// CHECK18-NEXT: store i64 [[TMP95]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP117]], align 8 +// CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 8 +// CHECK18-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK18-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 8 +// CHECK18-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP122]], align 8 +// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP124]], align 8 +// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* +// CHECK18-NEXT: store i64 5, i64* [[TMP126]], align 8 +// CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 // CHECK18-NEXT: store i8* null, i8** [[TMP127]], align 8 -// CHECK18-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 // CHECK18-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP129]], align 8 -// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 5 +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP129]], align 8 +// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 // CHECK18-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i64* -// CHECK18-NEXT: store i64 5, i64* [[TMP131]], align 8 -// CHECK18-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK18-NEXT: store i64 8, i64* [[TMP132]], align 8 -// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 5 -// CHECK18-NEXT: store i8* null, i8** [[TMP133]], align 8 -// CHECK18-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP135]], align 8 -// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP137]], align 8 -// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK18-NEXT: store i64 8, i64* [[TMP138]], align 8 -// CHECK18-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 -// CHECK18-NEXT: store i8* null, i8** [[TMP139]], align 8 -// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP141]], align 8 -// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP143]], align 8 -// CHECK18-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK18-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 8 -// CHECK18-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 -// CHECK18-NEXT: store i8* null, i8** [[TMP145]], align 8 -// CHECK18-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 8 -// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 8 -// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK18-NEXT: store i64 16, i64* [[TMP150]], align 8 -// CHECK18-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 -// CHECK18-NEXT: store i8* null, i8** [[TMP151]], align 8 -// CHECK18-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 -// CHECK18-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i64* -// CHECK18-NEXT: store i64 [[TMP93]], i64* [[TMP153]], align 8 -// CHECK18-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 -// CHECK18-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i64* -// CHECK18-NEXT: store i64 [[TMP93]], i64* [[TMP155]], align 8 -// CHECK18-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK18-NEXT: store i64 4, i64* [[TMP156]], align 8 -// CHECK18-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 -// CHECK18-NEXT: store i8* null, i8** [[TMP157]], align 8 -// CHECK18-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP131]], align 8 +// CHECK18-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 6 +// CHECK18-NEXT: store i8* null, i8** [[TMP132]], align 8 +// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP134]], align 8 +// CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK18-NEXT: store double* [[VLA1]], double** [[TMP136]], align 8 +// CHECK18-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK18-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 7 +// CHECK18-NEXT: store i8* null, i8** [[TMP137]], align 8 +// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 8 +// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK18-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 8 +// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 8 +// CHECK18-NEXT: store i8* null, i8** [[TMP142]], align 8 +// CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 9 +// CHECK18-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i64* +// CHECK18-NEXT: store i64 [[TMP93]], i64* [[TMP144]], align 8 +// CHECK18-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 9 +// CHECK18-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i64* +// CHECK18-NEXT: store i64 [[TMP93]], i64* [[TMP146]], align 8 +// CHECK18-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS31]], i64 0, i64 9 +// CHECK18-NEXT: store i8* null, i8** [[TMP147]], align 8 +// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS29]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS30]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK18-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] +// CHECK18-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK18-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] // CHECK18: omp_offload.failed33: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT34]] @@ -12498,10 +12323,10 @@ // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i64 [[TMP91]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]], i64 [[TMP93]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_IF_END36]] // CHECK18: omp_if.end36: -// CHECK18-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK18-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK18-NEXT: ret i32 [[TMP163]] +// CHECK18-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK18-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK18-NEXT: ret i32 [[TMP152]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -13196,7 +13021,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK18-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -13228,57 +13052,47 @@ // CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK18-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK18-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK18-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK18-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK18-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK18-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK18-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK18-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK18-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -13288,15 +13102,15 @@ // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR3]] // CHECK18-NEXT: br label [[OMP_IF_END]] // CHECK18: omp_if.end: -// CHECK18-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK18-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK18-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK18-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK18-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK18-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK18-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK18-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK18-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK18-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK18-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK18-NEXT: ret i32 [[ADD4]] // // @@ -13401,7 +13215,7 @@ // CHECK18-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK18-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK18-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK18-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -13472,7 +13286,7 @@ // CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK18-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -13511,11 +13325,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13642,11 +13456,11 @@ // CHECK18-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK18-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK18-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13792,11 +13606,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK18-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13923,7 +13737,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [10 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [10 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [10 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK19-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -14102,107 +13915,88 @@ // CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK19-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* // CHECK19-NEXT: store i32 [[TMP89]], i32* [[TMP101]], align 4 -// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP102]], align 4 -// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP103]], align 4 -// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 4 -// CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 4 -// CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 40, i64* [[TMP108]], align 4 -// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP109]], align 4 -// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP102]], align 4 +// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 +// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK19-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 4 +// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP107]], align 4 +// CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP109]], align 4 +// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 // CHECK19-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* // CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP111]], align 4 -// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP113]], align 4 -// CHECK19-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP114]], align 4 -// CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP115]], align 4 -// CHECK19-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP117]], align 4 -// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK19-NEXT: store float* [[VLA]], float** [[TMP119]], align 4 -// CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP94]], i64* [[TMP120]], align 4 -// CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP121]], align 4 -// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 4 -// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 4 -// CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 400, i64* [[TMP126]], align 4 -// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP112]], align 4 +// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 +// CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK19-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 +// CHECK19-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP117]], align 4 +// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 4 +// CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK19-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 4 +// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP122]], align 4 +// CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP124]], align 4 +// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK19-NEXT: store i32 5, i32* [[TMP126]], align 4 +// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 // CHECK19-NEXT: store i8* null, i8** [[TMP127]], align 4 -// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 // CHECK19-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP129]], align 4 -// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP129]], align 4 +// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 // CHECK19-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* -// CHECK19-NEXT: store i32 5, i32* [[TMP131]], align 4 -// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK19-NEXT: store i64 4, i64* [[TMP132]], align 4 -// CHECK19-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 -// CHECK19-NEXT: store i8* null, i8** [[TMP133]], align 4 -// CHECK19-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP135]], align 4 -// CHECK19-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32* -// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP137]], align 4 -// CHECK19-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK19-NEXT: store i64 4, i64* [[TMP138]], align 4 -// CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 -// CHECK19-NEXT: store i8* null, i8** [[TMP139]], align 4 -// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP141]], align 4 -// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP143]], align 4 -// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK19-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 4 -// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 -// CHECK19-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 4 -// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 4 -// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK19-NEXT: store i64 12, i64* [[TMP150]], align 4 -// CHECK19-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 -// CHECK19-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK19-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 -// CHECK19-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP153]], align 4 -// CHECK19-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 -// CHECK19-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP155]], align 4 -// CHECK19-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK19-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK19-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 -// CHECK19-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP131]], align 4 +// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 +// CHECK19-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK19-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP134]], align 4 +// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK19-NEXT: store double* [[VLA1]], double** [[TMP136]], align 4 +// CHECK19-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 +// CHECK19-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK19-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 4 +// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK19-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 4 +// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 +// CHECK19-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 +// CHECK19-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP144]], align 4 +// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 +// CHECK19-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i32* +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP146]], align 4 +// CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 +// CHECK19-NEXT: store i8* null, i8** [[TMP147]], align 4 +// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK19-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK19-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK19-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] // CHECK19: omp_offload.failed27: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT28]] @@ -14212,10 +14006,10 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_IF_END30]] // CHECK19: omp_if.end30: -// CHECK19-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK19-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK19-NEXT: ret i32 [[TMP163]] +// CHECK19-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK19-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK19-NEXT: ret i32 [[TMP152]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -14894,7 +14688,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -14925,57 +14718,47 @@ // CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK19-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK19-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK19-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK19-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK19-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK19-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK19-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK19-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK19-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -14985,15 +14768,15 @@ // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK19-NEXT: br label [[OMP_IF_END]] // CHECK19: omp_if.end: -// CHECK19-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK19-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK19-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK19-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK19-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK19-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK19-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK19-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK19-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK19-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK19-NEXT: ret i32 [[ADD3]] // // @@ -15096,7 +14879,7 @@ // CHECK19-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK19-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK19-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK19-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -15166,7 +14949,7 @@ // CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK19-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -15203,11 +14986,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15329,11 +15112,11 @@ // CHECK19-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK19-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15475,11 +15258,11 @@ // CHECK19-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK19-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15605,7 +15388,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS23:%.*]] = alloca [10 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS24:%.*]] = alloca [10 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS25:%.*]] = alloca [10 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK20-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -15784,107 +15566,88 @@ // CHECK20-NEXT: [[TMP100:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK20-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* // CHECK20-NEXT: store i32 [[TMP89]], i32* [[TMP101]], align 4 -// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP102]], align 4 -// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP103]], align 4 -// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP105]], align 4 -// CHECK20-NEXT: [[TMP106:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to [10 x float]** -// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP107]], align 4 -// CHECK20-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 40, i64* [[TMP108]], align 4 -// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP109]], align 4 -// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP102]], align 4 +// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 +// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to [10 x float]** +// CHECK20-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP106]], align 4 +// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP107]], align 4 +// CHECK20-NEXT: [[TMP108:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP109]], align 4 +// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 // CHECK20-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* // CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP111]], align 4 -// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP113]], align 4 -// CHECK20-NEXT: [[TMP114:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP114]], align 4 -// CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP115]], align 4 -// CHECK20-NEXT: [[TMP116:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP117]], align 4 -// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to float** -// CHECK20-NEXT: store float* [[VLA]], float** [[TMP119]], align 4 -// CHECK20-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP94]], i64* [[TMP120]], align 4 -// CHECK20-NEXT: [[TMP121:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP121]], align 4 -// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP123]], align 4 -// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to [5 x [10 x double]]** -// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP125]], align 4 -// CHECK20-NEXT: [[TMP126:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 400, i64* [[TMP126]], align 4 -// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP112]], align 4 +// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 +// CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** +// CHECK20-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 +// CHECK20-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP117]], align 4 +// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP119]], align 4 +// CHECK20-NEXT: [[TMP120:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to [5 x [10 x double]]** +// CHECK20-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP121]], align 4 +// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP122]], align 4 +// CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP124]], align 4 +// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK20-NEXT: store i32 5, i32* [[TMP126]], align 4 +// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 // CHECK20-NEXT: store i8* null, i8** [[TMP127]], align 4 -// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 // CHECK20-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP129]], align 4 -// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 5 +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP129]], align 4 +// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 // CHECK20-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* -// CHECK20-NEXT: store i32 5, i32* [[TMP131]], align 4 -// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK20-NEXT: store i64 4, i64* [[TMP132]], align 4 -// CHECK20-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 5 -// CHECK20-NEXT: store i8* null, i8** [[TMP133]], align 4 -// CHECK20-NEXT: [[TMP134:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP135]], align 4 -// CHECK20-NEXT: [[TMP136:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32* -// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP137]], align 4 -// CHECK20-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK20-NEXT: store i64 4, i64* [[TMP138]], align 4 -// CHECK20-NEXT: [[TMP139:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 -// CHECK20-NEXT: store i8* null, i8** [[TMP139]], align 4 -// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP141]], align 4 -// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to double** -// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP143]], align 4 -// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK20-NEXT: store i64 [[TMP97]], i64* [[TMP144]], align 4 -// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 -// CHECK20-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP147]], align 4 -// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to %struct.TT** -// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP149]], align 4 -// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK20-NEXT: store i64 12, i64* [[TMP150]], align 4 -// CHECK20-NEXT: [[TMP151:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 -// CHECK20-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK20-NEXT: [[TMP152:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 -// CHECK20-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP153]], align 4 -// CHECK20-NEXT: [[TMP154:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 -// CHECK20-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP155]], align 4 -// CHECK20-NEXT: [[TMP156:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK20-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK20-NEXT: [[TMP157:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 -// CHECK20-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK20-NEXT: [[TMP158:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP159:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP160:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP131]], align 4 +// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 6 +// CHECK20-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK20-NEXT: [[TMP133:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP134]], align 4 +// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to double** +// CHECK20-NEXT: store double* [[VLA1]], double** [[TMP136]], align 4 +// CHECK20-NEXT: store i64 [[TMP97]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 7 +// CHECK20-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK20-NEXT: [[TMP138:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP139]], align 4 +// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to %struct.TT** +// CHECK20-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP141]], align 4 +// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 8 +// CHECK20-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK20-NEXT: [[TMP143:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 9 +// CHECK20-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to i32* +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP144]], align 4 +// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 9 +// CHECK20-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to i32* +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP146]], align 4 +// CHECK20-NEXT: [[TMP147:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS25]], i32 0, i32 9 +// CHECK20-NEXT: store i8* null, i8** [[TMP147]], align 4 +// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS23]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP149:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS24]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP161:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP158]], i8** [[TMP159]], i64* [[TMP160]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP162:%.*]] = icmp ne i32 [[TMP161]], 0 -// CHECK20-NEXT: br i1 [[TMP162]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK20-NEXT: [[TMP150:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145.region_id, i32 10, i8** [[TMP148]], i8** [[TMP149]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP151:%.*]] = icmp ne i32 [[TMP150]], 0 +// CHECK20-NEXT: br i1 [[TMP151]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] // CHECK20: omp_offload.failed27: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT28]] @@ -15894,10 +15657,10 @@ // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l145(i32 [[TMP89]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]], i32 [[TMP91]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_IF_END30]] // CHECK20: omp_if.end30: -// CHECK20-NEXT: [[TMP163:%.*]] = load i32, i32* [[A]], align 4 -// CHECK20-NEXT: [[TMP164:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP164]]) -// CHECK20-NEXT: ret i32 [[TMP163]] +// CHECK20-NEXT: [[TMP152:%.*]] = load i32, i32* [[A]], align 4 +// CHECK20-NEXT: [[TMP153:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP153]]) +// CHECK20-NEXT: ret i32 [[TMP152]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l103 @@ -16576,7 +16339,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK20-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -16607,57 +16369,47 @@ // CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK20-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK20-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK20-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK20-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK20-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK20-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK20-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK20-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK20-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -16667,15 +16419,15 @@ // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l218(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR3]] // CHECK20-NEXT: br label [[OMP_IF_END]] // CHECK20: omp_if.end: -// CHECK20-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK20-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK20-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK20-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK20-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK20-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK20-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK20-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK20-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK20-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK20-NEXT: ret i32 [[ADD3]] // // @@ -16778,7 +16530,7 @@ // CHECK20-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK20-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK20-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l200.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK20-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -16848,7 +16600,7 @@ // CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l183.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK20-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -16885,11 +16637,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17011,11 +16763,11 @@ // CHECK20-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK20-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17157,11 +16909,11 @@ // CHECK20-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK20-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_collapse_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_collapse_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_collapse_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_collapse_codegen.cpp @@ -682,7 +682,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -719,74 +718,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -794,10 +783,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -1002,7 +991,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -1126,7 +1115,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1163,74 +1151,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1238,10 +1216,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -1446,7 +1424,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -1570,7 +1548,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1604,74 +1581,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1679,10 +1646,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -1879,7 +1846,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -2001,7 +1968,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2035,74 +2001,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2110,10 +2066,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -2310,7 +2266,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/target_teams_distribute_dist_schedule_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_dist_schedule_codegen.cpp @@ -1517,7 +1517,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -1525,20 +1524,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK9-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -1560,186 +1557,166 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK9-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK9-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK9-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK9-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK9-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK9-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK9-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK9-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK9-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK9-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK9-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK9-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK9-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK9-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK9-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK9-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK9-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK9-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK9-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK9-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK9-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP99]]) -// CHECK9-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK9-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK9: omp_offload.failed33: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK9: omp_offload.cont34: -// CHECK9-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK9-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP86]]) +// CHECK9-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK9-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK9: omp_offload.failed31: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK9: omp_offload.cont32: +// CHECK9-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK9-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP104]] +// CHECK9-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK9-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP91]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -1999,11 +1976,11 @@ // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2148,7 +2125,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -2166,7 +2143,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -2184,7 +2161,7 @@ // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK9-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -2200,11 +2177,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2277,11 +2254,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2354,11 +2331,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2462,7 +2439,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2470,20 +2446,18 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK10-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -2505,186 +2479,166 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK10-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK10-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK10-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK10-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK10-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK10-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK10-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK10-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK10-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK10-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK10-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK10-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK10-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK10-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK10-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK10-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK10-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK10-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK10-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK10-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK10-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK10-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK10-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK10-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK10-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP99]]) -// CHECK10-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK10-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK10: omp_offload.failed33: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK10: omp_offload.cont34: -// CHECK10-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK10-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK10-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK10-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP86]]) +// CHECK10-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK10-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK10: omp_offload.failed31: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK10: omp_offload.cont32: +// CHECK10-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK10-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP104]] +// CHECK10-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK10-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP91]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -2944,11 +2898,11 @@ // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3093,7 +3047,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -3111,7 +3065,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -3129,7 +3083,7 @@ // CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK10-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -3145,11 +3099,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3222,11 +3176,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3299,11 +3253,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3407,7 +3361,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3415,20 +3368,18 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK11-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -3449,185 +3400,165 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK11-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK11-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK11-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK11-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK11-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK11-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK11-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK11-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK11-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK11-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK11-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK11-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK11-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP101]]) -// CHECK11-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK11-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK11: omp_offload.failed30: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK11: omp_offload.cont31: -// CHECK11-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK11-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK11-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP88]]) +// CHECK11-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK11-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK11: omp_offload.failed28: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK11: omp_offload.cont29: +// CHECK11-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK11-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP106]] +// CHECK11-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK11-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP93]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -3875,11 +3806,11 @@ // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4021,7 +3952,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -4039,7 +3970,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -4057,7 +3988,7 @@ // CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK11-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -4073,11 +4004,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4149,11 +4080,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4225,11 +4156,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4332,7 +4263,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4340,20 +4270,18 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK12-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -4374,185 +4302,165 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK12-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK12-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK12-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK12-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK12-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK12-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK12-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK12-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK12-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK12-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK12-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK12-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK12-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK12-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK12-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK12-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP101]]) -// CHECK12-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK12-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK12: omp_offload.failed30: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK12: omp_offload.cont31: -// CHECK12-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK12-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK12-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK12-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP88]]) +// CHECK12-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK12-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK12: omp_offload.failed28: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK12: omp_offload.cont29: +// CHECK12-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK12-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP106]] +// CHECK12-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK12-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP93]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -4800,11 +4708,11 @@ // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4946,7 +4854,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -4964,7 +4872,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -4982,7 +4890,7 @@ // CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK12-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -4998,11 +4906,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5074,11 +4982,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5150,11 +5058,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_collapse_codegen.cpp @@ -979,7 +979,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1016,74 +1015,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1091,10 +1080,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -1429,7 +1418,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -1628,7 +1617,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1665,74 +1653,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1740,10 +1718,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -2078,7 +2056,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -2277,7 +2255,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2311,74 +2288,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2386,10 +2353,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -2716,7 +2683,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -2909,7 +2876,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2943,74 +2909,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3018,10 +2974,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -3348,7 +3304,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_dist_schedule_codegen.cpp @@ -2394,7 +2394,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2402,20 +2401,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK9-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -2438,186 +2435,166 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK9-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK9-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK9-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK9-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK9-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK9-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK9-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK9-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK9-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK9-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK9-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK9-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK9-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK9-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK9-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK9-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK9-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK9-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK9-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK9-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK9-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK9-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK9-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK9: omp_offload.failed33: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK9: omp_offload.cont34: -// CHECK9-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK9-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK9-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK9-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK9: omp_offload.failed31: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK9: omp_offload.cont32: +// CHECK9-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK9-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP104]] +// CHECK9-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK9-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP91]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -3089,11 +3066,11 @@ // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3180,7 +3157,7 @@ // CHECK9-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4 // CHECK9-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3222,7 +3199,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3363,7 +3340,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -3381,7 +3358,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -3413,7 +3390,7 @@ // CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -3429,11 +3406,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3481,7 +3458,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3496,7 +3473,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3579,11 +3556,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3631,7 +3608,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3646,7 +3623,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3737,11 +3714,11 @@ // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3797,7 +3774,7 @@ // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK9-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3833,7 +3810,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3934,7 +3911,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3942,20 +3918,18 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK10-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -3978,186 +3952,166 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK10-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK10-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK10-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK10-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK10-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK10-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK10-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK10-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK10-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK10-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK10-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK10-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK10-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK10-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK10-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK10-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK10-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK10-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK10-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK10-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK10-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK10-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK10-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK10-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK10-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK10-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK10-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK10: omp_offload.failed33: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK10: omp_offload.cont34: -// CHECK10-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK10-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK10-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK10-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK10-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK10-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK10: omp_offload.failed31: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK10: omp_offload.cont32: +// CHECK10-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK10-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP104]] +// CHECK10-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK10-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP91]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -4629,11 +4583,11 @@ // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4720,7 +4674,7 @@ // CHECK10-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4 // CHECK10-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -4762,7 +4716,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4903,7 +4857,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -4921,7 +4875,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -4953,7 +4907,7 @@ // CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK10-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -4969,11 +4923,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5021,7 +4975,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -5036,7 +4990,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5119,11 +5073,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5171,7 +5125,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -5186,7 +5140,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5277,11 +5231,11 @@ // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5337,7 +5291,7 @@ // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK10-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -5373,7 +5327,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5474,7 +5428,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -5482,20 +5435,18 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK11-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -5517,185 +5468,165 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK11-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK11-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK11-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK11-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK11-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK11-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK11-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK11-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK11-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK11-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK11-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK11-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK11-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK11-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK11-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK11: omp_offload.failed30: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK11: omp_offload.cont31: -// CHECK11-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK11-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK11-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK11-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK11-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK11: omp_offload.failed28: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK11: omp_offload.cont29: +// CHECK11-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK11-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP106]] +// CHECK11-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK11-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP93]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -6143,11 +6074,11 @@ // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6228,7 +6159,7 @@ // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6270,7 +6201,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6406,7 +6337,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -6424,7 +6355,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -6455,7 +6386,7 @@ // CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK11-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -6471,11 +6402,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6521,7 +6452,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6536,7 +6467,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6616,11 +6547,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6666,7 +6597,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6681,7 +6612,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6767,11 +6698,11 @@ // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6823,7 +6754,7 @@ // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6859,7 +6790,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6956,7 +6887,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6964,20 +6894,18 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK12-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -6999,185 +6927,165 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK12-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK12-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK12-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK12-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK12-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK12-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK12-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK12-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK12-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK12-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK12-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK12-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK12-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK12-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK12-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK12-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK12-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK12-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK12: omp_offload.failed30: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK12: omp_offload.cont31: -// CHECK12-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK12-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK12-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK12-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK12-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK12-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK12: omp_offload.failed28: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK12: omp_offload.cont29: +// CHECK12-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK12-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP106]] +// CHECK12-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK12-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP93]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -7625,11 +7533,11 @@ // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7710,7 +7618,7 @@ // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -7752,7 +7660,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7888,7 +7796,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -7906,7 +7814,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -7937,7 +7845,7 @@ // CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK12-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -7953,11 +7861,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8003,7 +7911,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8018,7 +7926,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8098,11 +8006,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8148,7 +8056,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8163,7 +8071,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8249,11 +8157,11 @@ // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8305,7 +8213,7 @@ // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8341,7 +8249,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_schedule_codegen.cpp @@ -7412,7 +7412,6 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -7420,38 +7419,34 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -7474,314 +7469,280 @@ // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK13-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK13-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK13-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK13-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK13-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK13-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK13-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK13-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK13-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: -// CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK13-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK13-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK13-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK13-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK13-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK13-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK13-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK13-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK13-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK13-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK13-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK13-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK13-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK13-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK13: omp_offload.failed16: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK13: omp_offload.cont17: -// CHECK13-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK13-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK13-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK13-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK13-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK13-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK13-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK13-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK13-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK13-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK13-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK13-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK13-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK13-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK13-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK13-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK13-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK13-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK13: omp_offload.failed15: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK13: omp_offload.cont16: +// CHECK13-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK13-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK13-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK13-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK13-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK13-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK13-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK13-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK13-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK13-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK13-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK13-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK13-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK13-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK13-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK13-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK13-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK13-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK13-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK13-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK13-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK13-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK13-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK13-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK13-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK13-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK13-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK13-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK13-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK13-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK13-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK13-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK13: omp_offload.failed33: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK13: omp_offload.cont34: -// CHECK13-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK13-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK13-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK13-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK13-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK13-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK13-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK13-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK13-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK13-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK13-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK13-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK13-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK13-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK13-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK13-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK13-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK13-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK13-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK13-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK13-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK13: omp_offload.failed48: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK13: omp_offload.cont49: -// CHECK13-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK13-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK13-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK13-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK13-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK13-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK13-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK13-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK13-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK13-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK13-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK13-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK13-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK13-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK13-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK13-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK13-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK13: omp_offload.failed31: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK13: omp_offload.cont32: +// CHECK13-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK13-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK13-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK13-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK13-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK13-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK13-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK13-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK13-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK13-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK13-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK13-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK13-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK13-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK13-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK13-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK13-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK13-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK13-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK13: omp_offload.failed45: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK13: omp_offload.cont46: +// CHECK13-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK13-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK13-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK13-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK13-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK13-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK13-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK13-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK13-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK13-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK13-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK13-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK13-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK13-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK13-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK13-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK13-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK13-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK13-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK13-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK13-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK13-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK13-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK13-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK13-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK13-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK13-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK13-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK13-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK13-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK13-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK13-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK13-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK13-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK13-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK13-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK13-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK13-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK13-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK13-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK13-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK13-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK13-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK13-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK13-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK13: omp_offload.failed66: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK13: omp_offload.cont67: -// CHECK13-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK13-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK13-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK13-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK13-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK13-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK13-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK13-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK13-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK13-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK13-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK13-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK13-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK13-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK13: omp_offload.failed62: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK13: omp_offload.cont63: +// CHECK13-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK13-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK13-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK13-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP173]] +// CHECK13-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK13-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP151]] // // // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -8253,11 +8214,11 @@ // CHECK13-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK13-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8344,7 +8305,7 @@ // CHECK13-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4 // CHECK13-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8386,7 +8347,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8511,11 +8472,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8592,7 +8553,7 @@ // CHECK13-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4 // CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8611,7 +8572,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8737,11 +8698,11 @@ // CHECK13-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK13-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8826,7 +8787,7 @@ // CHECK13-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4 // CHECK13-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8845,7 +8806,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8993,7 +8954,7 @@ // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK13-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: @@ -9011,7 +8972,7 @@ // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK13-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK13: omp_offload.failed5: @@ -9043,7 +9004,7 @@ // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK13-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK13: omp_offload.failed11: @@ -9061,7 +9022,7 @@ // CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK13-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK13: omp_offload.failed17: @@ -9093,7 +9054,7 @@ // CHECK13-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK13-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK13-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK13: omp_offload.failed26: @@ -9109,11 +9070,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9161,7 +9122,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9176,7 +9137,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9259,11 +9220,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9311,7 +9272,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9326,7 +9287,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9417,11 +9378,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9477,7 +9438,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9492,7 +9453,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9600,11 +9561,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9652,7 +9613,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9667,7 +9628,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9756,11 +9717,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9816,7 +9777,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9831,7 +9792,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9931,7 +9892,6 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -9939,38 +9899,34 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK14-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK14-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -9993,314 +9949,280 @@ // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK14-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK14-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK14-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK14-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK14-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK14-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK14-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK14-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK14-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK14-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK14-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK14: omp_offload.cont: -// CHECK14-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK14-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK14-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK14-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK14-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK14-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK14-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK14-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK14-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK14-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK14-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK14-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK14-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK14-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK14-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK14-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK14-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK14: omp_offload.failed16: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK14: omp_offload.cont17: -// CHECK14-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK14-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK14-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK14-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK14-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK14-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK14-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK14-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK14-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK14-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK14-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK14-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK14-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK14-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK14-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK14-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK14-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK14-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK14-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK14: omp_offload.failed15: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK14: omp_offload.cont16: +// CHECK14-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK14-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK14-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK14-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK14-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK14-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK14-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK14-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK14-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK14-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK14-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK14-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK14-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK14-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK14-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK14-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK14-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK14-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK14-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK14-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK14-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK14-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK14-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK14-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK14-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK14-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK14-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK14-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK14-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK14-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK14-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK14-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK14: omp_offload.failed33: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK14: omp_offload.cont34: -// CHECK14-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK14-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK14-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK14-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK14-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK14-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK14-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK14-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK14-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK14-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK14-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK14-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK14-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK14-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK14-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK14-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK14-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK14-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK14-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK14-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK14-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK14: omp_offload.failed48: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK14: omp_offload.cont49: -// CHECK14-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK14-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK14-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK14-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK14-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK14-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK14-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK14-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK14-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK14-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK14-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK14-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK14-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK14-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK14-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK14-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK14-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK14: omp_offload.failed31: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK14: omp_offload.cont32: +// CHECK14-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK14-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK14-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK14-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK14-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK14-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK14-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK14-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK14-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK14-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK14-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK14-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK14-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK14-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK14-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK14-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK14-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK14-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK14-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK14: omp_offload.failed45: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK14: omp_offload.cont46: +// CHECK14-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK14-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK14-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK14-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK14-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK14-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK14-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK14-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK14-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK14-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK14-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK14-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK14-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK14-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK14-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK14-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK14-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK14-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK14-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK14-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK14-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK14-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK14-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK14-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK14-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK14-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK14-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK14-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK14-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK14-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK14-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK14-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK14-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK14-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK14-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK14-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK14-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK14-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK14-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK14-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK14-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK14-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK14-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK14-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK14-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK14: omp_offload.failed66: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK14: omp_offload.cont67: -// CHECK14-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK14-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK14-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK14-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK14-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK14-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK14-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK14-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK14-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK14-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK14-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK14-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK14-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK14-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK14: omp_offload.failed62: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK14: omp_offload.cont63: +// CHECK14-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK14-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK14-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK14-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK14-NEXT: ret i32 [[TMP173]] +// CHECK14-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK14-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK14-NEXT: ret i32 [[TMP151]] // // // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -10772,11 +10694,11 @@ // CHECK14-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK14-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10863,7 +10785,7 @@ // CHECK14-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4 // CHECK14-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -10905,7 +10827,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11030,11 +10952,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11111,7 +11033,7 @@ // CHECK14-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4 // CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11130,7 +11052,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11256,11 +11178,11 @@ // CHECK14-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK14-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11345,7 +11267,7 @@ // CHECK14-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4 // CHECK14-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11364,7 +11286,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11512,7 +11434,7 @@ // CHECK14-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK14-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: @@ -11530,7 +11452,7 @@ // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK14-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK14: omp_offload.failed5: @@ -11562,7 +11484,7 @@ // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK14-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK14: omp_offload.failed11: @@ -11580,7 +11502,7 @@ // CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK14-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK14: omp_offload.failed17: @@ -11612,7 +11534,7 @@ // CHECK14-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK14-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK14-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK14: omp_offload.failed26: @@ -11628,11 +11550,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11680,7 +11602,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11695,7 +11617,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11778,11 +11700,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11830,7 +11752,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11845,7 +11767,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11936,11 +11858,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11996,7 +11918,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -12011,7 +11933,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12119,11 +12041,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12171,7 +12093,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -12186,7 +12108,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12275,11 +12197,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12335,7 +12257,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -12350,7 +12272,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12450,7 +12372,6 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -12458,38 +12379,34 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK15-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK15-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK15-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK15-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -12511,312 +12428,278 @@ // CHECK15-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK15-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK15-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK15-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK15-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK15-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK15-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK15-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK15-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK15-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK15-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK15-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK15-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK15-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: // CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK15: omp_offload.cont: -// CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK15-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK15-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK15-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK15-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK15-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK15-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK15-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK15-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK15-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK15-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK15-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK15-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK15-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK15-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK15-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK15-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK15-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK15-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK15-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK15-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK15-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK15-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK15: omp_offload.failed15: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK15: omp_offload.cont16: -// CHECK15-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK15-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK15-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK15-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK15-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK15-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK15-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK15-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK15-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK15-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK15-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK15-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK15-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK15-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK15-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK15-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK15-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK15-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK15-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK15-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK15-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK15-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK15-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK15: omp_offload.failed14: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK15: omp_offload.cont15: +// CHECK15-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK15-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK15-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK15-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK15-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK15-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK15-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK15-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK15-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK15-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK15-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK15-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK15-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK15-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK15-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK15-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK15-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK15-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK15-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK15-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK15-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK15-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK15-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK15-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK15-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK15-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK15-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK15-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK15-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK15-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK15-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK15-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK15-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK15-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK15-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK15-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK15-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK15-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK15-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK15-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK15-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK15: omp_offload.failed30: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK15: omp_offload.cont31: -// CHECK15-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK15-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK15-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK15-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK15-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK15-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK15-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK15-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK15-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK15-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK15-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK15-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK15-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK15-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK15-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK15-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK15-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK15-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK15-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK15-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK15-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK15-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK15-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK15-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK15-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK15-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK15-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK15-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK15: omp_offload.failed44: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK15: omp_offload.cont45: -// CHECK15-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK15-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK15-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK15-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK15-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK15-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK15-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK15-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK15-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK15-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK15-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK15-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK15-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK15-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK15-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK15-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK15-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK15: omp_offload.failed28: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK15: omp_offload.cont29: +// CHECK15-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK15-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK15-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK15-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK15-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK15-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK15-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK15-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK15-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK15-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK15-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK15-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK15-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK15-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK15-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK15-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK15-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK15-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK15-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK15-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK15-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK15-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK15-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK15-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK15: omp_offload.failed41: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK15: omp_offload.cont42: +// CHECK15-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK15-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK15-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK15-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK15-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK15-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK15-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK15-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK15-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK15-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK15-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK15-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK15-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK15-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK15-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK15-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK15-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK15-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK15-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK15-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK15-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK15-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK15-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK15-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK15-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK15-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK15-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK15-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK15-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK15-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK15-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK15-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK15-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK15-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK15-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK15-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK15-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK15-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK15-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK15-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK15-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK15-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK15-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK15-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK15-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK15-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK15-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK15-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK15-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK15: omp_offload.failed60: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK15: omp_offload.cont61: -// CHECK15-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK15-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK15-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK15-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK15-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK15-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK15-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK15-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK15-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK15-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK15-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK15-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK15-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK15-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK15: omp_offload.failed56: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK15: omp_offload.cont57: +// CHECK15-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK15-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK15-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK15-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP177]] +// CHECK15-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK15-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP155]] // // // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -13264,11 +13147,11 @@ // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13349,7 +13232,7 @@ // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -13391,7 +13274,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13509,11 +13392,11 @@ // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13586,7 +13469,7 @@ // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4 // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -13605,7 +13488,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13723,11 +13606,11 @@ // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13806,7 +13689,7 @@ // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -13825,7 +13708,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13968,7 +13851,7 @@ // CHECK15-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK15-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: @@ -13986,7 +13869,7 @@ // CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK15-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK15: omp_offload.failed5: @@ -14017,7 +13900,7 @@ // CHECK15-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK15-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK15-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK15: omp_offload.failed11: @@ -14035,7 +13918,7 @@ // CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK15-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK15: omp_offload.failed17: @@ -14066,7 +13949,7 @@ // CHECK15-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK15-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK15-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK15: omp_offload.failed25: @@ -14082,11 +13965,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14132,7 +14015,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14147,7 +14030,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14227,11 +14110,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14277,7 +14160,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14292,7 +14175,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14378,11 +14261,11 @@ // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14434,7 +14317,7 @@ // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14449,7 +14332,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14551,11 +14434,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14601,7 +14484,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14616,7 +14499,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14700,11 +14583,11 @@ // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14756,7 +14639,7 @@ // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14771,7 +14654,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14867,7 +14750,6 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -14875,38 +14757,34 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK16-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK16-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK16-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK16-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK16-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK16-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -14928,312 +14806,278 @@ // CHECK16-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK16-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK16-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK16-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK16-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK16-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK16-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK16-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK16-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK16-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK16-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK16-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK16-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK16-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK16-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK16-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK16-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: // CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK16: omp_offload.cont: -// CHECK16-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK16-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK16-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK16-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK16-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK16-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK16-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK16-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK16-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK16-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK16-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK16-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK16-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK16-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK16-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK16-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK16-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK16-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK16-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK16-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK16-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK16-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK16-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK16-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK16: omp_offload.failed15: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK16: omp_offload.cont16: -// CHECK16-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK16-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK16-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK16-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK16-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK16-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK16-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK16-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK16-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK16-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK16-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK16-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK16-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK16-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK16-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK16-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK16-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK16-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK16-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK16-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK16-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK16-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK16-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK16-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK16: omp_offload.failed14: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK16: omp_offload.cont15: +// CHECK16-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK16-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK16-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK16-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK16-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK16-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK16-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK16-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK16-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK16-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK16-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK16-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK16-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK16-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK16-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK16-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK16-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK16-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK16-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK16-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK16-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK16-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK16-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK16-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK16-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK16-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK16-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK16-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK16-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK16-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK16-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK16-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK16-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK16-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK16-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK16-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK16-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK16-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK16-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK16-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK16-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK16-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK16-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK16-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK16: omp_offload.failed30: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK16: omp_offload.cont31: -// CHECK16-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK16-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK16-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK16-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK16-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK16-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK16-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK16-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK16-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK16-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK16-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK16-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK16-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK16-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK16-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK16-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK16-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK16-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK16-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK16-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK16-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK16-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK16-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK16-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK16-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK16-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK16-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK16-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK16: omp_offload.failed44: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK16: omp_offload.cont45: -// CHECK16-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK16-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK16-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK16-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK16-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK16-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK16-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK16-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK16-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK16-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK16-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK16-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK16-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK16-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK16-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK16-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK16-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK16: omp_offload.failed28: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK16: omp_offload.cont29: +// CHECK16-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK16-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK16-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK16-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK16-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK16-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK16-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK16-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK16-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK16-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK16-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK16-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK16-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK16-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK16-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK16-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK16-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK16-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK16-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK16-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK16-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK16-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK16-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK16-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK16-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK16: omp_offload.failed41: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK16: omp_offload.cont42: +// CHECK16-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK16-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK16-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK16-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK16-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK16-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK16-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK16-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK16-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK16-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK16-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK16-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK16-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK16-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK16-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK16-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK16-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK16-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK16-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK16-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK16-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK16-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK16-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK16-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK16-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK16-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK16-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK16-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK16-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK16-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK16-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK16-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK16-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK16-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK16-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK16-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK16-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK16-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK16-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK16-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK16-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK16-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK16-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK16-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK16-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK16-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK16-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK16-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK16-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK16: omp_offload.failed60: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK16: omp_offload.cont61: -// CHECK16-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK16-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK16-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK16-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK16-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK16-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK16-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK16-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK16-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK16-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK16-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK16-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK16-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK16-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK16: omp_offload.failed56: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK16: omp_offload.cont57: +// CHECK16-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK16-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK16-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK16-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK16-NEXT: ret i32 [[TMP177]] +// CHECK16-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK16-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK16-NEXT: ret i32 [[TMP155]] // // // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -15681,11 +15525,11 @@ // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15766,7 +15610,7 @@ // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -15808,7 +15652,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15926,11 +15770,11 @@ // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16003,7 +15847,7 @@ // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4 // CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16022,7 +15866,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16140,11 +15984,11 @@ // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16223,7 +16067,7 @@ // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16242,7 +16086,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16385,7 +16229,7 @@ // CHECK16-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK16-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: @@ -16403,7 +16247,7 @@ // CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK16-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK16: omp_offload.failed5: @@ -16434,7 +16278,7 @@ // CHECK16-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK16-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK16-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK16: omp_offload.failed11: @@ -16452,7 +16296,7 @@ // CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK16-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK16: omp_offload.failed17: @@ -16483,7 +16327,7 @@ // CHECK16-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK16-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK16-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK16: omp_offload.failed25: @@ -16499,11 +16343,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16549,7 +16393,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16564,7 +16408,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16644,11 +16488,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16694,7 +16538,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16709,7 +16553,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16795,11 +16639,11 @@ // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16851,7 +16695,7 @@ // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16866,7 +16710,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16968,11 +16812,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17018,7 +16862,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -17033,7 +16877,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17117,11 +16961,11 @@ // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17173,7 +17017,7 @@ // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -17188,7 +17032,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17284,7 +17128,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -17292,38 +17135,34 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK17-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK17-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK17-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK17-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK17-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK17-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -17346,314 +17185,280 @@ // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK17-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK17-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK17-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK17-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK17-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK17-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK17-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK17-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: -// CHECK17-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK17-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK17-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK17-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK17-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK17-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK17-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK17-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK17-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK17-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK17-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK17-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK17-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK17-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK17-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK17-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK17-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK17-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK17: omp_offload.failed16: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK17: omp_offload.cont17: -// CHECK17-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK17-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK17-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK17-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK17-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK17-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK17-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK17-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK17-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK17-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK17-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK17-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK17-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK17-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK17-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK17-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK17-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK17-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK17-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK17: omp_offload.failed15: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK17: omp_offload.cont16: +// CHECK17-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK17-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK17-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK17-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK17-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK17-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK17-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK17-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK17-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK17-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK17-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK17-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK17-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK17-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK17-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK17-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK17-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK17-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK17-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK17-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK17-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK17-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK17-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK17-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK17-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK17-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK17-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK17-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK17: omp_offload.failed33: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK17: omp_offload.cont34: -// CHECK17-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK17-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK17-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK17-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK17-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK17-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK17-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK17-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK17-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK17-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK17-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK17-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK17-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK17-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK17-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK17-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK17-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK17: omp_offload.failed48: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK17: omp_offload.cont49: -// CHECK17-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK17-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK17-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK17-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK17-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK17-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK17-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK17-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK17-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK17-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK17-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK17-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK17-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK17-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK17-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK17-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK17-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK17-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK17-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK17: omp_offload.failed31: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK17: omp_offload.cont32: +// CHECK17-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK17-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK17-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK17-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK17-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK17-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK17-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK17-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK17-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK17-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK17-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK17-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK17-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK17-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK17-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK17-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK17-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK17: omp_offload.failed45: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK17: omp_offload.cont46: +// CHECK17-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK17-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK17-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK17-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK17-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK17-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK17-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK17-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK17-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK17-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK17-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK17-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK17-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK17-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK17-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK17-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK17-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK17-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK17-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK17-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK17-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK17-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK17-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK17-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK17-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK17-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK17-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK17-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK17-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK17-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK17-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK17-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK17: omp_offload.failed66: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK17: omp_offload.cont67: -// CHECK17-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK17-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK17-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK17-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK17-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK17-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK17-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK17-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK17-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK17-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK17-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK17-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK17: omp_offload.failed62: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK17: omp_offload.cont63: +// CHECK17-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK17-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK17-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK17-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK17-NEXT: ret i32 [[TMP173]] +// CHECK17-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK17-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK17-NEXT: ret i32 [[TMP151]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -18125,11 +17930,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18216,7 +18021,7 @@ // CHECK17-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4 // CHECK17-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18258,7 +18063,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18383,11 +18188,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18464,7 +18269,7 @@ // CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4 // CHECK17-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18483,7 +18288,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18609,11 +18414,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18698,7 +18503,7 @@ // CHECK17-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4 // CHECK17-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18717,7 +18522,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18865,7 +18670,7 @@ // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK17-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -18883,7 +18688,7 @@ // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK17-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK17: omp_offload.failed5: @@ -18915,7 +18720,7 @@ // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK17-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK17: omp_offload.failed11: @@ -18933,7 +18738,7 @@ // CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK17-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK17: omp_offload.failed17: @@ -18965,7 +18770,7 @@ // CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK17-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK17: omp_offload.failed26: @@ -18981,11 +18786,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19033,7 +18838,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19048,7 +18853,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19131,11 +18936,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19183,7 +18988,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19198,7 +19003,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19289,11 +19094,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19349,7 +19154,7 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19364,7 +19169,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19472,11 +19277,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19524,7 +19329,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19539,7 +19344,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19628,11 +19433,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19688,7 +19493,7 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19703,7 +19508,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19803,7 +19608,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -19811,38 +19615,34 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK18-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK18-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK18-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK18-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK18-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK18-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK18-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -19865,314 +19665,280 @@ // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK18-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK18-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK18-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK18-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK18-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK18-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK18-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK18-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK18: omp_offload.cont: -// CHECK18-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK18-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK18-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK18-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK18-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK18-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK18-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK18-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK18-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK18-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK18-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK18-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK18-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK18-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK18-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK18-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK18-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK18-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK18: omp_offload.failed16: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK18: omp_offload.cont17: -// CHECK18-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK18-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK18-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK18-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK18-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK18-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK18-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK18-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK18-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK18-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK18-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK18-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK18-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK18-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK18-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK18-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK18-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK18-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK18-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK18: omp_offload.failed15: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK18: omp_offload.cont16: +// CHECK18-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK18-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK18-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK18-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK18-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK18-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK18-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK18-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK18-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK18-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK18-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK18-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK18-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK18-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK18-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK18-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK18-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK18-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK18-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK18-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK18-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK18-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK18-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK18-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK18-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK18-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK18-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK18-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK18-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK18-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK18-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK18-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK18-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK18-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK18-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK18-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK18-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK18: omp_offload.failed33: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK18: omp_offload.cont34: -// CHECK18-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK18-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK18-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK18-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK18-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK18-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK18-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK18-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK18-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK18-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK18-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK18-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK18-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK18-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK18-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK18-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK18-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK18-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK18: omp_offload.failed48: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK18: omp_offload.cont49: -// CHECK18-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK18-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK18-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK18-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK18-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK18-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK18-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK18-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK18-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK18-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK18-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK18-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK18-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK18-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK18-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK18-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK18-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK18-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK18-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK18: omp_offload.failed31: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK18: omp_offload.cont32: +// CHECK18-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK18-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK18-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK18-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK18-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK18-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK18-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK18-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK18-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK18-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK18-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK18-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK18-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK18-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK18-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK18-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK18-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK18-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK18-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK18: omp_offload.failed45: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK18: omp_offload.cont46: +// CHECK18-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK18-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK18-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK18-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK18-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK18-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK18-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK18-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK18-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK18-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK18-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK18-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK18-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK18-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK18-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK18-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK18-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK18-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK18-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK18-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK18-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK18-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK18-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK18-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK18-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK18-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK18-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK18-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK18-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK18-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK18-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK18-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK18-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK18-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK18-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK18-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK18-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK18-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK18-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK18-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK18-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK18: omp_offload.failed66: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK18: omp_offload.cont67: -// CHECK18-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK18-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK18-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK18-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK18-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK18-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK18-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK18-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK18-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK18-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK18-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK18-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK18-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK18-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK18: omp_offload.failed62: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK18: omp_offload.cont63: +// CHECK18-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK18-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK18-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK18-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK18-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK18-NEXT: ret i32 [[TMP173]] +// CHECK18-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK18-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK18-NEXT: ret i32 [[TMP151]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -20644,11 +20410,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20735,7 +20501,7 @@ // CHECK18-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4 // CHECK18-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -20777,7 +20543,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20902,11 +20668,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20983,7 +20749,7 @@ // CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4 // CHECK18-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21002,7 +20768,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21128,11 +20894,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21217,7 +20983,7 @@ // CHECK18-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4 // CHECK18-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21236,7 +21002,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21384,7 +21150,7 @@ // CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK18-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -21402,7 +21168,7 @@ // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK18-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK18: omp_offload.failed5: @@ -21434,7 +21200,7 @@ // CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK18-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK18: omp_offload.failed11: @@ -21452,7 +21218,7 @@ // CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK18-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK18: omp_offload.failed17: @@ -21484,7 +21250,7 @@ // CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK18-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK18-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK18: omp_offload.failed26: @@ -21500,11 +21266,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21552,7 +21318,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21567,7 +21333,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21650,11 +21416,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21702,7 +21468,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21717,7 +21483,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21808,11 +21574,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21868,7 +21634,7 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21883,7 +21649,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21991,11 +21757,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -22043,7 +21809,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -22058,7 +21824,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -22147,11 +21913,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -22207,7 +21973,7 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -22222,7 +21988,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -22322,7 +22088,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -22330,38 +22095,34 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK19-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK19-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -22383,312 +22144,278 @@ // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK19-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK19-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK19-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK19-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK19-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK19-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK19-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: -// CHECK19-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK19-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK19-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK19-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK19-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK19-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK19-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK19-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK19: omp_offload.failed15: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK19: omp_offload.cont16: -// CHECK19-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK19-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK19-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK19-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK19-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK19-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK19-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK19-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK19-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK19-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK19-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK19-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK19-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK19-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK19: omp_offload.failed14: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK19: omp_offload.cont15: +// CHECK19-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK19-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK19-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK19-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK19-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK19-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK19-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK19-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK19-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK19-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK19-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK19-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK19-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK19-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK19-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK19-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK19-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK19-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK19-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK19-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK19-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK19: omp_offload.failed30: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK19: omp_offload.cont31: -// CHECK19-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK19-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK19-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK19-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK19-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK19-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK19-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK19-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK19-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK19-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK19-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK19-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK19-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK19-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK19-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK19: omp_offload.failed44: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK19: omp_offload.cont45: -// CHECK19-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK19-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK19-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK19-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK19-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK19-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK19-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK19-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK19-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK19-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK19-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK19-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK19-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK19-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK19-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK19: omp_offload.failed28: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK19: omp_offload.cont29: +// CHECK19-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK19-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK19-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK19-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK19-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK19-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK19-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK19-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK19-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK19-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK19-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK19-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK19-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK19-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK19: omp_offload.failed41: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK19: omp_offload.cont42: +// CHECK19-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK19-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK19-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK19-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK19-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK19-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK19-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK19-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK19-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK19-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK19-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK19-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK19-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK19-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK19-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK19-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK19-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK19-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK19-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK19-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK19-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK19-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK19-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK19-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK19-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK19-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK19-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK19-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK19-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK19-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK19-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK19: omp_offload.failed60: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK19: omp_offload.cont61: -// CHECK19-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK19-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK19-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK19-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK19-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK19-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK19-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK19-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK19-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK19-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK19-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK19-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK19: omp_offload.failed56: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK19: omp_offload.cont57: +// CHECK19-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK19-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK19-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK19-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK19-NEXT: ret i32 [[TMP177]] +// CHECK19-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK19-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK19-NEXT: ret i32 [[TMP155]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -23136,11 +22863,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23221,7 +22948,7 @@ // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -23263,7 +22990,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23381,11 +23108,11 @@ // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23458,7 +23185,7 @@ // CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -23477,7 +23204,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23595,11 +23322,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23678,7 +23405,7 @@ // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -23697,7 +23424,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23840,7 +23567,7 @@ // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK19-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -23858,7 +23585,7 @@ // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK19-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK19: omp_offload.failed5: @@ -23889,7 +23616,7 @@ // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK19-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK19: omp_offload.failed11: @@ -23907,7 +23634,7 @@ // CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK19-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK19: omp_offload.failed17: @@ -23938,7 +23665,7 @@ // CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK19-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK19-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK19: omp_offload.failed25: @@ -23954,11 +23681,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24004,7 +23731,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24019,7 +23746,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24099,11 +23826,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24149,7 +23876,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24164,7 +23891,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24250,11 +23977,11 @@ // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24306,7 +24033,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24321,7 +24048,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24423,11 +24150,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24473,7 +24200,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24488,7 +24215,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24572,11 +24299,11 @@ // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24628,7 +24355,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24643,7 +24370,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24739,7 +24466,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -24747,38 +24473,34 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK20-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK20-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK20-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -24800,312 +24522,278 @@ // CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK20-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK20-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK20-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK20-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK20-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK20-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK20-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK20: omp_offload.cont: -// CHECK20-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK20-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK20-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK20-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK20-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK20-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK20-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK20-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK20: omp_offload.failed15: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK20: omp_offload.cont16: -// CHECK20-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK20-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK20-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK20-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK20-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK20-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK20-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK20-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK20-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK20-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK20-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK20-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK20-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK20-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK20: omp_offload.failed14: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK20: omp_offload.cont15: +// CHECK20-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK20-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK20-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK20-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK20-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK20-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK20-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK20-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK20-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK20-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK20-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK20-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK20-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK20-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK20-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK20-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK20-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK20-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK20-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK20-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK20-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK20-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK20-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK20-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK20-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK20-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK20-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK20: omp_offload.failed30: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK20: omp_offload.cont31: -// CHECK20-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK20-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK20-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK20-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK20-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK20-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK20-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK20-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK20-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK20-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK20-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK20-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK20-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK20-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK20-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK20-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK20-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK20-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK20: omp_offload.failed44: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK20: omp_offload.cont45: -// CHECK20-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK20-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK20-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK20-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK20-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK20-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK20-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK20-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK20-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK20-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK20-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK20-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK20-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK20-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK20-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK20: omp_offload.failed28: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK20: omp_offload.cont29: +// CHECK20-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK20-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK20-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK20-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK20-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK20-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK20-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK20-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK20-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK20-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK20-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK20-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK20-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK20-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK20-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK20: omp_offload.failed41: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK20: omp_offload.cont42: +// CHECK20-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK20-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK20-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK20-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK20-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK20-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK20-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK20-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK20-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK20-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK20-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK20-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK20-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK20-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK20-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK20-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK20-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK20-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK20-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK20-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK20-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK20-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK20-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK20-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK20-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK20-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK20-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK20-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK20-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK20-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK20-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK20-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK20-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK20-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK20-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK20-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK20: omp_offload.failed60: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK20: omp_offload.cont61: -// CHECK20-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK20-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK20-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK20-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK20-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK20-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK20-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK20-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK20-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK20-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK20-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK20-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK20: omp_offload.failed56: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK20: omp_offload.cont57: +// CHECK20-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK20-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK20-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK20-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK20-NEXT: ret i32 [[TMP177]] +// CHECK20-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK20-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK20-NEXT: ret i32 [[TMP155]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -25553,11 +25241,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25638,7 +25326,7 @@ // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -25680,7 +25368,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25798,11 +25486,11 @@ // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25875,7 +25563,7 @@ // CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4 // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -25894,7 +25582,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26012,11 +25700,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26095,7 +25783,7 @@ // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26114,7 +25802,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26257,7 +25945,7 @@ // CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK20-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -26275,7 +25963,7 @@ // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK20-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK20: omp_offload.failed5: @@ -26306,7 +25994,7 @@ // CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK20-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK20: omp_offload.failed11: @@ -26324,7 +26012,7 @@ // CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK20-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK20: omp_offload.failed17: @@ -26355,7 +26043,7 @@ // CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK20-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK20-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK20: omp_offload.failed25: @@ -26371,11 +26059,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26421,7 +26109,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26436,7 +26124,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26516,11 +26204,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26566,7 +26254,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26581,7 +26269,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26667,11 +26355,11 @@ // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26723,7 +26411,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26738,7 +26426,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26840,11 +26528,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26890,7 +26578,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26905,7 +26593,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26989,11 +26677,11 @@ // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27045,7 +26733,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -27060,7 +26748,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_collapse_codegen.cpp @@ -1323,7 +1323,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1360,74 +1359,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1435,10 +1424,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -1809,7 +1798,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -2024,7 +2013,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2061,74 +2049,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2136,10 +2114,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -2510,7 +2488,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -2725,7 +2703,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2759,74 +2736,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2834,10 +2801,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -3200,7 +3167,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -3409,7 +3376,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -3443,74 +3409,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3518,10 +3474,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l81 @@ -3884,7 +3840,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp @@ -3056,7 +3056,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3064,20 +3063,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK9-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -3100,186 +3097,166 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK9-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK9-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK9-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK9-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK9-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK9-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK9-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK9-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK9-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK9-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK9-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK9-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK9-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK9-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK9-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK9-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK9-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK9-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK9-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK9-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK9-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK9-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK9-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK9: omp_offload.failed33: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK9: omp_offload.cont34: -// CHECK9-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK9-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK9-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK9-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK9: omp_offload.failed31: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK9: omp_offload.cont32: +// CHECK9-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK9-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP104]] +// CHECK9-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK9-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP91]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -3799,11 +3776,11 @@ // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3890,7 +3867,7 @@ // CHECK9-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !24 // CHECK9-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !24 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !24 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !24 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 @@ -3944,7 +3921,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4097,7 +4074,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -4115,7 +4092,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -4147,7 +4124,7 @@ // CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK9-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -4163,11 +4140,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4215,7 +4192,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 @@ -4237,7 +4214,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4327,11 +4304,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4379,7 +4356,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 @@ -4401,7 +4378,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4499,11 +4476,11 @@ // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK9-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4559,7 +4536,7 @@ // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !42 // CHECK9-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !42 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 @@ -4602,7 +4579,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4710,7 +4687,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4718,20 +4694,18 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK10-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -4754,186 +4728,166 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK10-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK10-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK10-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK10-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK10-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK10-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK10-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK10-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK10-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK10-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK10-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK10-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK10-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK10-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK10-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK10-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK10-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK10-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK10-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK10-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK10-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK10-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK10-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK10-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK10-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK10-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK10-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK10: omp_offload.failed33: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK10: omp_offload.cont34: -// CHECK10-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK10-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK10-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK10-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK10-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK10-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK10: omp_offload.failed31: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK10: omp_offload.cont32: +// CHECK10-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK10-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP104]] +// CHECK10-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK10-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP91]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -5453,11 +5407,11 @@ // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5544,7 +5498,7 @@ // CHECK10-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !24 // CHECK10-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !24 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !24 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !24 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 @@ -5598,7 +5552,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5751,7 +5705,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -5769,7 +5723,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -5801,7 +5755,7 @@ // CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK10-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -5817,11 +5771,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5869,7 +5823,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 @@ -5891,7 +5845,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5981,11 +5935,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6033,7 +5987,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 @@ -6055,7 +6009,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6153,11 +6107,11 @@ // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK10-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6213,7 +6167,7 @@ // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !42 // CHECK10-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !42 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 @@ -6256,7 +6210,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6364,7 +6318,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6372,20 +6325,18 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK11-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -6407,185 +6358,165 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK11-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK11-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK11-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK11-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK11-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK11-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK11-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK11-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK11-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK11-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK11-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK11-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK11-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK11-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK11-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK11: omp_offload.failed30: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK11: omp_offload.cont31: -// CHECK11-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK11-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK11-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK11-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK11-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK11: omp_offload.failed28: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK11: omp_offload.cont29: +// CHECK11-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK11-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP106]] +// CHECK11-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK11-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP93]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -7081,11 +7012,11 @@ // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7166,7 +7097,7 @@ // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !25 // CHECK11-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !25 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !25 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 @@ -7220,7 +7151,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7368,7 +7299,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -7386,7 +7317,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -7417,7 +7348,7 @@ // CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK11-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -7433,11 +7364,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7483,7 +7414,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31 @@ -7505,7 +7436,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7592,11 +7523,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7642,7 +7573,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37 @@ -7664,7 +7595,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7757,11 +7688,11 @@ // CHECK11-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7813,7 +7744,7 @@ // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !43 // CHECK11-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 @@ -7856,7 +7787,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7960,7 +7891,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -7968,20 +7898,18 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK12-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -8003,185 +7931,165 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK12-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l104(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK12-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK12-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK12-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK12-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK12-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK12-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK12-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK12-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK12-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK12-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK12-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK12-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK12-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK12-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK12-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK12-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK12-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK12: omp_offload.failed30: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK12: omp_offload.cont31: -// CHECK12-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK12-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK12-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK12-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK12-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK12-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK12: omp_offload.failed28: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK12: omp_offload.cont29: +// CHECK12-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK12-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP106]] +// CHECK12-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK12-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP93]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -8677,11 +8585,11 @@ // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8762,7 +8670,7 @@ // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !25 // CHECK12-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !25 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !25 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 @@ -8816,7 +8724,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8964,7 +8872,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l81.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -8982,7 +8890,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l85.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -9013,7 +8921,7 @@ // CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK12-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -9029,11 +8937,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9079,7 +8987,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31 @@ -9101,7 +9009,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9188,11 +9096,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9238,7 +9146,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37 @@ -9260,7 +9168,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9353,11 +9261,11 @@ // CHECK12-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9409,7 +9317,7 @@ // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !43 // CHECK12-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 @@ -9452,7 +9360,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_schedule_codegen.cpp @@ -8742,7 +8742,6 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -8750,38 +8749,34 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -8804,314 +8799,280 @@ // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK13-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK13-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK13-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK13-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK13-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK13-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK13-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK13-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK13-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: -// CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK13-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK13-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK13-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK13-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK13-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK13-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK13-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK13-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK13-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK13-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK13-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK13-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK13-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK13-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK13: omp_offload.failed16: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK13: omp_offload.cont17: -// CHECK13-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK13-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK13-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK13-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK13-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK13-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK13-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK13-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK13-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK13-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK13-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK13-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK13-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK13-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK13-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK13-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK13-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK13-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK13: omp_offload.failed15: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK13: omp_offload.cont16: +// CHECK13-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK13-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK13-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK13-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK13-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK13-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK13-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK13-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK13-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK13-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK13-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK13-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK13-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK13-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK13-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK13-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK13-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK13-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK13-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK13-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK13-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK13-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK13-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK13-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK13-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK13-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK13-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK13-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK13-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK13-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK13-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK13-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK13: omp_offload.failed33: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK13: omp_offload.cont34: -// CHECK13-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK13-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK13-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK13-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK13-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK13-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK13-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK13-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK13-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK13-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK13-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK13-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK13-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK13-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK13-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK13-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK13-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK13-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK13-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK13-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK13-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK13: omp_offload.failed48: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK13: omp_offload.cont49: -// CHECK13-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK13-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK13-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK13-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK13-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK13-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK13-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK13-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK13-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK13-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK13-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK13-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK13-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK13-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK13-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK13-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK13-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK13: omp_offload.failed31: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK13: omp_offload.cont32: +// CHECK13-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK13-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK13-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK13-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK13-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK13-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK13-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK13-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK13-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK13-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK13-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK13-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK13-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK13-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK13-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK13-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK13-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK13-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK13-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK13: omp_offload.failed45: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK13: omp_offload.cont46: +// CHECK13-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK13-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK13-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK13-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK13-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK13-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK13-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK13-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK13-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK13-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK13-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK13-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK13-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK13-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK13-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK13-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK13-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK13-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK13-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK13-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK13-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK13-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK13-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK13-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK13-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK13-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK13-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK13-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK13-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK13-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK13-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK13-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK13-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK13-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK13-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK13-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK13-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK13-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK13-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK13-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK13-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK13-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK13-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK13-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK13-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK13: omp_offload.failed66: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK13: omp_offload.cont67: -// CHECK13-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK13-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK13-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK13-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK13-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK13-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK13-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK13-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK13-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK13-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK13-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK13-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK13-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK13-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK13: omp_offload.failed62: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK13: omp_offload.cont63: +// CHECK13-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK13-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK13-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK13-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP173]] +// CHECK13-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK13-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP151]] // // // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -9631,11 +9592,11 @@ // CHECK13-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK13-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9722,7 +9683,7 @@ // CHECK13-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28 // CHECK13-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -9776,7 +9737,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9913,11 +9874,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9994,7 +9955,7 @@ // CHECK13-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34 // CHECK13-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -10025,7 +9986,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10163,11 +10124,11 @@ // CHECK13-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK13-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10252,7 +10213,7 @@ // CHECK13-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40 // CHECK13-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -10283,7 +10244,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10443,7 +10404,7 @@ // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK13-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: @@ -10461,7 +10422,7 @@ // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK13-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK13: omp_offload.failed5: @@ -10493,7 +10454,7 @@ // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK13-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK13: omp_offload.failed11: @@ -10511,7 +10472,7 @@ // CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK13-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK13: omp_offload.failed17: @@ -10543,7 +10504,7 @@ // CHECK13-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK13-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK13-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK13: omp_offload.failed26: @@ -10559,11 +10520,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10611,7 +10572,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -10633,7 +10594,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10723,11 +10684,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10775,7 +10736,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -10797,7 +10758,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10895,11 +10856,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10955,7 +10916,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -10977,7 +10938,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11092,11 +11053,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11144,7 +11105,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -11166,7 +11127,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11262,11 +11223,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11322,7 +11283,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -11344,7 +11305,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11451,7 +11412,6 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -11459,38 +11419,34 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK14-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK14-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -11513,314 +11469,280 @@ // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK14-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK14-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK14-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK14-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK14-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK14-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK14-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK14-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK14-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK14-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK14-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK14: omp_offload.cont: -// CHECK14-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK14-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK14-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK14-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK14-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK14-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK14-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK14-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK14-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK14-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK14-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK14-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK14-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK14-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK14-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK14-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK14-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK14: omp_offload.failed16: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK14: omp_offload.cont17: -// CHECK14-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK14-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK14-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK14-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK14-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK14-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK14-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK14-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK14-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK14-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK14-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK14-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK14-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK14-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK14-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK14-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK14-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK14-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK14-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK14: omp_offload.failed15: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK14: omp_offload.cont16: +// CHECK14-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK14-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK14-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK14-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK14-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK14-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK14-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK14-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK14-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK14-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK14-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK14-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK14-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK14-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK14-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK14-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK14-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK14-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK14-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK14-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK14-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK14-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK14-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK14-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK14-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK14-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK14-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK14-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK14-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK14-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK14-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK14-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK14: omp_offload.failed33: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK14: omp_offload.cont34: -// CHECK14-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK14-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK14-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK14-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK14-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK14-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK14-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK14-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK14-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK14-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK14-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK14-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK14-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK14-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK14-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK14-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK14-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK14-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK14-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK14-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK14-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK14: omp_offload.failed48: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK14: omp_offload.cont49: -// CHECK14-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK14-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK14-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK14-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK14-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK14-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK14-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK14-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK14-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK14-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK14-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK14-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK14-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK14-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK14-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK14-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK14-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK14: omp_offload.failed31: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK14: omp_offload.cont32: +// CHECK14-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK14-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK14-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK14-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK14-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK14-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK14-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK14-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK14-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK14-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK14-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK14-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK14-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK14-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK14-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK14-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK14-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK14-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK14-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK14: omp_offload.failed45: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK14: omp_offload.cont46: +// CHECK14-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK14-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK14-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK14-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK14-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK14-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK14-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK14-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK14-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK14-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK14-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK14-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK14-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK14-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK14-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK14-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK14-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK14-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK14-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK14-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK14-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK14-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK14-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK14-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK14-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK14-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK14-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK14-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK14-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK14-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK14-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK14-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK14-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK14-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK14-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK14-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK14-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK14-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK14-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK14-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK14-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK14-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK14-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK14-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK14-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK14: omp_offload.failed66: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK14: omp_offload.cont67: -// CHECK14-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK14-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK14-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK14-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK14-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK14-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK14-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK14-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK14-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK14-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK14-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK14-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK14-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK14-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK14: omp_offload.failed62: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK14: omp_offload.cont63: +// CHECK14-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK14-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK14-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK14-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK14-NEXT: ret i32 [[TMP173]] +// CHECK14-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK14-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK14-NEXT: ret i32 [[TMP151]] // // // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -12340,11 +12262,11 @@ // CHECK14-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK14-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12431,7 +12353,7 @@ // CHECK14-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28 // CHECK14-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -12485,7 +12407,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12622,11 +12544,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12703,7 +12625,7 @@ // CHECK14-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34 // CHECK14-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -12734,7 +12656,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12872,11 +12794,11 @@ // CHECK14-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK14-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12961,7 +12883,7 @@ // CHECK14-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40 // CHECK14-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -12992,7 +12914,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13152,7 +13074,7 @@ // CHECK14-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK14-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: @@ -13170,7 +13092,7 @@ // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK14-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK14: omp_offload.failed5: @@ -13202,7 +13124,7 @@ // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK14-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK14: omp_offload.failed11: @@ -13220,7 +13142,7 @@ // CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK14-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK14: omp_offload.failed17: @@ -13252,7 +13174,7 @@ // CHECK14-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK14-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK14-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK14: omp_offload.failed26: @@ -13268,11 +13190,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13320,7 +13242,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -13342,7 +13264,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13432,11 +13354,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13484,7 +13406,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -13506,7 +13428,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13604,11 +13526,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13664,7 +13586,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -13686,7 +13608,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13801,11 +13723,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13853,7 +13775,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -13875,7 +13797,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13971,11 +13893,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -14031,7 +13953,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -14053,7 +13975,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -14160,7 +14082,6 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -14168,38 +14089,34 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK15-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK15-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK15-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK15-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -14221,312 +14138,278 @@ // CHECK15-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK15-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK15-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK15-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK15-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK15-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK15-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK15-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK15-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK15-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK15-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK15-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK15-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK15-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: // CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK15: omp_offload.cont: -// CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK15-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK15-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK15-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK15-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK15-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK15-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK15-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK15-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK15-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK15-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK15-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK15-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK15-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK15-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK15-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK15-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK15-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK15-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK15-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK15-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK15-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK15-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK15: omp_offload.failed15: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK15: omp_offload.cont16: -// CHECK15-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK15-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK15-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK15-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK15-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK15-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK15-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK15-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK15-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK15-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK15-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK15-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK15-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK15-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK15-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK15-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK15-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK15-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK15-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK15-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK15-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK15-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK15-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK15: omp_offload.failed14: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK15: omp_offload.cont15: +// CHECK15-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK15-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK15-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK15-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK15-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK15-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK15-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK15-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK15-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK15-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK15-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK15-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK15-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK15-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK15-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK15-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK15-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK15-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK15-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK15-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK15-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK15-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK15-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK15-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK15-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK15-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK15-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK15-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK15-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK15-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK15-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK15-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK15-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK15-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK15-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK15-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK15-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK15-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK15-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK15-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK15-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK15: omp_offload.failed30: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK15: omp_offload.cont31: -// CHECK15-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK15-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK15-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK15-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK15-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK15-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK15-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK15-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK15-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK15-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK15-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK15-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK15-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK15-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK15-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK15-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK15-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK15-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK15-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK15-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK15-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK15-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK15-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK15-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK15-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK15-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK15-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK15-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK15: omp_offload.failed44: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK15: omp_offload.cont45: -// CHECK15-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK15-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK15-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK15-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK15-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK15-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK15-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK15-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK15-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK15-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK15-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK15-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK15-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK15-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK15-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK15-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK15-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK15: omp_offload.failed28: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK15: omp_offload.cont29: +// CHECK15-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK15-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK15-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK15-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK15-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK15-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK15-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK15-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK15-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK15-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK15-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK15-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK15-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK15-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK15-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK15-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK15-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK15-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK15-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK15-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK15-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK15-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK15-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK15-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK15: omp_offload.failed41: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK15: omp_offload.cont42: +// CHECK15-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK15-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK15-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK15-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK15-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK15-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK15-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK15-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK15-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK15-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK15-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK15-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK15-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK15-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK15-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK15-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK15-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK15-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK15-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK15-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK15-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK15-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK15-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK15-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK15-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK15-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK15-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK15-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK15-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK15-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK15-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK15-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK15-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK15-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK15-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK15-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK15-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK15-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK15-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK15-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK15-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK15-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK15-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK15-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK15-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK15-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK15-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK15-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK15-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK15: omp_offload.failed60: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK15: omp_offload.cont61: -// CHECK15-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK15-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK15-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK15-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK15-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK15-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK15-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK15-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK15-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK15-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK15-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK15-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK15-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK15-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK15: omp_offload.failed56: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK15: omp_offload.cont57: +// CHECK15-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK15-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK15-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK15-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP177]] +// CHECK15-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK15-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP155]] // // // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -15022,11 +14905,11 @@ // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15107,7 +14990,7 @@ // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK15-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -15161,7 +15044,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15291,11 +15174,11 @@ // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15368,7 +15251,7 @@ // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35 // CHECK15-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35 // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -15399,7 +15282,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15529,11 +15412,11 @@ // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15612,7 +15495,7 @@ // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK15-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -15643,7 +15526,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15798,7 +15681,7 @@ // CHECK15-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK15-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: @@ -15816,7 +15699,7 @@ // CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK15-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK15: omp_offload.failed5: @@ -15847,7 +15730,7 @@ // CHECK15-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK15-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK15-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK15: omp_offload.failed11: @@ -15865,7 +15748,7 @@ // CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK15-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK15: omp_offload.failed17: @@ -15896,7 +15779,7 @@ // CHECK15-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK15-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK15-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK15: omp_offload.failed25: @@ -15912,11 +15795,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15962,7 +15845,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -15984,7 +15867,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16071,11 +15954,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16121,7 +16004,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -16143,7 +16026,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16236,11 +16119,11 @@ // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16292,7 +16175,7 @@ // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -16314,7 +16197,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16423,11 +16306,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16473,7 +16356,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -16495,7 +16378,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16586,11 +16469,11 @@ // CHECK15-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16642,7 +16525,7 @@ // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -16664,7 +16547,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16767,7 +16650,6 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -16775,38 +16657,34 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK16-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK16-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK16-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK16-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK16-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK16-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -16828,312 +16706,278 @@ // CHECK16-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK16-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK16-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK16-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK16-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK16-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK16-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK16-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK16-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK16-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK16-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK16-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK16-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK16-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK16-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK16-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK16-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: // CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK16: omp_offload.cont: -// CHECK16-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK16-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK16-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK16-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK16-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK16-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK16-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK16-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK16-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK16-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK16-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK16-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK16-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK16-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK16-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK16-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK16-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK16-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK16-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK16-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK16-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK16-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK16-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK16-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK16: omp_offload.failed15: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK16: omp_offload.cont16: -// CHECK16-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK16-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK16-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK16-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK16-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK16-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK16-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK16-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK16-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK16-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK16-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK16-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK16-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK16-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK16-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK16-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK16-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK16-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK16-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK16-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK16-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK16-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK16-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK16-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK16: omp_offload.failed14: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK16: omp_offload.cont15: +// CHECK16-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK16-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK16-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK16-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK16-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK16-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK16-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK16-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK16-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK16-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK16-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK16-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK16-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK16-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK16-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK16-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK16-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK16-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK16-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK16-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK16-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK16-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK16-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK16-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK16-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK16-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK16-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK16-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK16-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK16-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK16-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK16-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK16-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK16-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK16-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK16-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK16-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK16-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK16-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK16-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK16-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK16-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK16-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK16-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK16: omp_offload.failed30: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK16: omp_offload.cont31: -// CHECK16-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK16-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK16-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK16-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK16-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK16-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK16-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK16-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK16-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK16-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK16-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK16-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK16-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK16-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK16-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK16-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK16-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK16-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK16-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK16-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK16-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK16-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK16-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK16-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK16-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK16-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK16-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK16-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK16: omp_offload.failed44: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK16: omp_offload.cont45: -// CHECK16-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK16-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK16-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK16-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK16-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK16-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK16-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK16-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK16-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK16-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK16-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK16-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK16-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK16-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK16-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK16-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK16-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK16: omp_offload.failed28: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK16: omp_offload.cont29: +// CHECK16-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK16-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK16-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK16-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK16-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK16-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK16-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK16-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK16-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK16-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK16-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK16-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK16-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK16-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK16-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK16-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK16-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK16-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK16-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK16-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK16-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK16-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK16-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK16-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK16-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK16: omp_offload.failed41: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK16: omp_offload.cont42: +// CHECK16-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK16-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK16-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK16-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK16-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK16-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK16-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK16-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK16-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK16-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK16-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK16-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK16-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK16-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK16-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK16-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK16-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK16-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK16-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK16-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK16-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK16-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK16-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK16-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK16-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK16-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK16-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK16-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK16-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK16-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK16-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK16-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK16-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK16-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK16-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK16-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK16-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK16-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK16-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK16-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK16-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK16-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK16-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK16-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK16-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK16-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK16-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK16-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK16-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK16: omp_offload.failed60: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK16: omp_offload.cont61: -// CHECK16-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK16-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK16-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK16-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK16-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK16-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK16-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK16-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK16-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK16-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK16-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK16-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK16-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK16-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK16: omp_offload.failed56: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK16: omp_offload.cont57: +// CHECK16-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK16-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK16-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK16-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK16-NEXT: ret i32 [[TMP177]] +// CHECK16-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK16-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK16-NEXT: ret i32 [[TMP155]] // // // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -17629,11 +17473,11 @@ // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17714,7 +17558,7 @@ // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK16-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -17768,7 +17612,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17898,11 +17742,11 @@ // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17975,7 +17819,7 @@ // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35 // CHECK16-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35 // CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -18006,7 +17850,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18136,11 +17980,11 @@ // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18219,7 +18063,7 @@ // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK16-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -18250,7 +18094,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18405,7 +18249,7 @@ // CHECK16-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK16-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: @@ -18423,7 +18267,7 @@ // CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK16-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK16: omp_offload.failed5: @@ -18454,7 +18298,7 @@ // CHECK16-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK16-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK16-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK16: omp_offload.failed11: @@ -18472,7 +18316,7 @@ // CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK16-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK16: omp_offload.failed17: @@ -18503,7 +18347,7 @@ // CHECK16-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK16-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK16-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK16: omp_offload.failed25: @@ -18519,11 +18363,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18569,7 +18413,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -18591,7 +18435,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18678,11 +18522,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18728,7 +18572,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -18750,7 +18594,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18843,11 +18687,11 @@ // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -18899,7 +18743,7 @@ // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -18921,7 +18765,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -19030,11 +18874,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -19080,7 +18924,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -19102,7 +18946,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -19193,11 +19037,11 @@ // CHECK16-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -19249,7 +19093,7 @@ // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -19271,7 +19115,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -19374,7 +19218,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -19382,38 +19225,34 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK17-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK17-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK17-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK17-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK17-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK17-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -19436,314 +19275,280 @@ // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK17-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK17-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK17-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK17-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK17-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK17-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK17-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK17-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: -// CHECK17-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK17-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK17-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK17-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK17-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK17-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK17-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK17-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK17-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK17-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK17-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK17-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK17-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK17-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK17-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK17-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK17-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK17-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK17: omp_offload.failed16: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK17: omp_offload.cont17: -// CHECK17-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK17-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK17-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK17-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK17-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK17-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK17-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK17-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK17-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK17-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK17-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK17-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK17-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK17-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK17-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK17-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK17-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK17-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK17-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK17: omp_offload.failed15: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK17: omp_offload.cont16: +// CHECK17-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK17-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK17-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK17-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK17-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK17-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK17-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK17-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK17-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK17-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK17-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK17-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK17-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK17-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK17-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK17-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK17-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK17-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK17-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK17-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK17-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK17-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK17-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK17-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK17-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK17-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK17-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK17-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK17-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK17: omp_offload.failed33: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK17: omp_offload.cont34: -// CHECK17-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK17-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK17-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK17-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK17-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK17-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK17-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK17-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK17-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK17-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK17-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK17-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK17-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK17-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK17-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK17-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK17-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK17: omp_offload.failed48: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK17: omp_offload.cont49: -// CHECK17-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK17-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK17-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK17-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK17-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK17-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK17-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK17-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK17-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK17-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK17-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK17-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK17-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK17-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK17-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK17-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK17-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK17-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK17-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK17: omp_offload.failed31: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK17: omp_offload.cont32: +// CHECK17-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK17-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK17-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK17-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK17-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK17-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK17-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK17-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK17-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK17-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK17-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK17-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK17-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK17-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK17-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK17-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK17-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK17-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK17-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK17: omp_offload.failed45: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK17: omp_offload.cont46: +// CHECK17-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK17-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK17-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK17-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK17-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK17-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK17-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK17-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK17-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK17-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK17-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK17-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK17-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK17-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK17-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK17-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK17-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK17-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK17-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK17-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK17-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK17-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK17-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK17-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK17-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK17-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK17-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK17-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK17-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK17-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK17-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK17-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK17: omp_offload.failed66: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK17: omp_offload.cont67: -// CHECK17-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK17-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK17-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK17-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK17-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK17-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK17-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK17-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK17-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK17-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK17-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK17-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK17: omp_offload.failed62: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK17: omp_offload.cont63: +// CHECK17-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK17-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK17-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK17-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK17-NEXT: ret i32 [[TMP173]] +// CHECK17-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK17-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK17-NEXT: ret i32 [[TMP151]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -20263,11 +20068,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20354,7 +20159,7 @@ // CHECK17-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28 // CHECK17-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -20408,7 +20213,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20545,11 +20350,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20626,7 +20431,7 @@ // CHECK17-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34 // CHECK17-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -20657,7 +20462,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20795,11 +20600,11 @@ // CHECK17-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK17-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20884,7 +20689,7 @@ // CHECK17-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40 // CHECK17-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -20915,7 +20720,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21075,7 +20880,7 @@ // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK17-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -21093,7 +20898,7 @@ // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK17-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK17: omp_offload.failed5: @@ -21125,7 +20930,7 @@ // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK17-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK17: omp_offload.failed11: @@ -21143,7 +20948,7 @@ // CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK17-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK17: omp_offload.failed17: @@ -21175,7 +20980,7 @@ // CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK17-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK17: omp_offload.failed26: @@ -21191,11 +20996,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21243,7 +21048,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -21265,7 +21070,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21355,11 +21160,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21407,7 +21212,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -21429,7 +21234,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21527,11 +21332,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21587,7 +21392,7 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -21609,7 +21414,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21724,11 +21529,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21776,7 +21581,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -21798,7 +21603,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21894,11 +21699,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21954,7 +21759,7 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -21976,7 +21781,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -22083,7 +21888,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -22091,38 +21895,34 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK18-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK18-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED35:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS39:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES40:%.*]] = alloca [3 x i64], align 8 -// CHECK18-NEXT: [[_TMP41:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED53:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS56:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS57:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES58:%.*]] = alloca [4 x i64], align 8 -// CHECK18-NEXT: [[_TMP59:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_60:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_61:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED33:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS37:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[_TMP38:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_40:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_47:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR__CASTED50:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS53:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS54:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[_TMP55:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_56:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_57:%.*]] = alloca i32, align 4 // CHECK18-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK18-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK18-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -22145,314 +21945,280 @@ // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK18-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK18-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK18-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK18-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK18-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK18-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK18-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK18-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK18: omp_offload.cont: -// CHECK18-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK18-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK18-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK18-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK18-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK18-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK18-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK18-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK18-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK18-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK18-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK18-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK18-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK18-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK18-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK18-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK18-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK18-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK18: omp_offload.failed16: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK18: omp_offload.cont17: -// CHECK18-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK18-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK18-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK18-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK18-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK18-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK18-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK18-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK18-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK18-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK18-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK18-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK18-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK18-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK18-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK18-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK18-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK18-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK18-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK18: omp_offload.failed15: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK18: omp_offload.cont16: +// CHECK18-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK18-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK18-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK18-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK18-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK18-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK18-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK18-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK18-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK18-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK18-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK18-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK18-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK18-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK18-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK18-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK18-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK18-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK18-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK18-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK18-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK18-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK18-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK18-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK18-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK18-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK18-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK18-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK18-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK18-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK18-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK18-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK18-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK18-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK18-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP99]]) -// CHECK18-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK18-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK18: omp_offload.failed33: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK18: omp_offload.cont34: -// CHECK18-NEXT: [[TMP102:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV36:%.*]] = bitcast i64* [[N_CASTED35]] to i32* -// CHECK18-NEXT: store i32 [[TMP102]], i32* [[CONV36]], align 4 -// CHECK18-NEXT: [[TMP103:%.*]] = load i64, i64* [[N_CASTED35]], align 8 -// CHECK18-NEXT: [[TMP104:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* -// CHECK18-NEXT: store i64 [[TMP103]], i64* [[TMP106]], align 8 -// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* -// CHECK18-NEXT: store i64 [[TMP103]], i64* [[TMP108]], align 8 -// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP109]], align 8 -// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP110]], align 8 -// CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP112:%.*]] = bitcast i8** [[TMP111]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP112]], align 8 -// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP114]], align 8 -// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP115]], align 8 -// CHECK18-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP116]], align 8 -// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP118]], align 8 -// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 8 -// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP104]], i64* [[TMP121]], align 8 -// CHECK18-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS39]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP122]], align 8 -// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS37]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS38]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES40]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP126:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP126]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK18-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK18-NEXT: [[SUB44:%.*]] = sub nsw i32 [[TMP127]], 0 -// CHECK18-NEXT: [[DIV45:%.*]] = sdiv i32 [[SUB44]], 1 -// CHECK18-NEXT: [[SUB46:%.*]] = sub nsw i32 [[DIV45]], 1 -// CHECK18-NEXT: store i32 [[SUB46]], i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK18-NEXT: [[TMP128:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 -// CHECK18-NEXT: [[ADD47:%.*]] = add nsw i32 [[TMP128]], 1 -// CHECK18-NEXT: [[TMP129:%.*]] = zext i32 [[ADD47]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP129]]) -// CHECK18-NEXT: [[TMP130:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP123]], i8** [[TMP124]], i64* [[TMP125]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP131:%.*]] = icmp ne i32 [[TMP130]], 0 -// CHECK18-NEXT: br i1 [[TMP131]], label [[OMP_OFFLOAD_FAILED48:%.*]], label [[OMP_OFFLOAD_CONT49:%.*]] -// CHECK18: omp_offload.failed48: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP103]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT49]] -// CHECK18: omp_offload.cont49: -// CHECK18-NEXT: [[TMP132:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: store i32 [[TMP132]], i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK18-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK18-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK18-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK18-NEXT: [[TMP135:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 -// CHECK18-NEXT: [[CONV54:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED53]] to i32* -// CHECK18-NEXT: store i32 [[TMP135]], i32* [[CONV54]], align 4 -// CHECK18-NEXT: [[TMP136:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED53]], align 8 -// CHECK18-NEXT: [[TMP137:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK18-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK18-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK18-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK18-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK18-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK18-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK18-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK18-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP86]]) +// CHECK18-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK18-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK18: omp_offload.failed31: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK18: omp_offload.cont32: +// CHECK18-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV34:%.*]] = bitcast i64* [[N_CASTED33]] to i32* +// CHECK18-NEXT: store i32 [[TMP89]], i32* [[CONV34]], align 4 +// CHECK18-NEXT: [[TMP90:%.*]] = load i64, i64* [[N_CASTED33]], align 8 +// CHECK18-NEXT: [[TMP91:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP92:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP93:%.*]] = bitcast i8** [[TMP92]] to i64* +// CHECK18-NEXT: store i64 [[TMP90]], i64* [[TMP93]], align 8 +// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i64* +// CHECK18-NEXT: store i64 [[TMP90]], i64* [[TMP95]], align 8 +// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP96]], align 8 +// CHECK18-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP98]], align 8 +// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP100]], align 8 +// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP101]], align 8 +// CHECK18-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP103]], align 8 +// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 8 +// CHECK18-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS37]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP106]], align 8 +// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS35]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS36]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP109:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP109]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK18-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK18-NEXT: [[SUB41:%.*]] = sub nsw i32 [[TMP110]], 0 +// CHECK18-NEXT: [[DIV42:%.*]] = sdiv i32 [[SUB41]], 1 +// CHECK18-NEXT: [[SUB43:%.*]] = sub nsw i32 [[DIV42]], 1 +// CHECK18-NEXT: store i32 [[SUB43]], i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK18-NEXT: [[TMP111:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_40]], align 4 +// CHECK18-NEXT: [[ADD44:%.*]] = add nsw i32 [[TMP111]], 1 +// CHECK18-NEXT: [[TMP112:%.*]] = zext i32 [[ADD44]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP112]]) +// CHECK18-NEXT: [[TMP113:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP107]], i8** [[TMP108]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP114:%.*]] = icmp ne i32 [[TMP113]], 0 +// CHECK18-NEXT: br i1 [[TMP114]], label [[OMP_OFFLOAD_FAILED45:%.*]], label [[OMP_OFFLOAD_CONT46:%.*]] +// CHECK18: omp_offload.failed45: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i64 [[TMP90]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT46]] +// CHECK18: omp_offload.cont46: +// CHECK18-NEXT: [[TMP115:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: store i32 [[TMP115]], i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK18-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK18-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK18-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK18-NEXT: [[TMP118:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_47]], align 4 +// CHECK18-NEXT: [[CONV51:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED50]] to i32* +// CHECK18-NEXT: store i32 [[TMP118]], i32* [[CONV51]], align 4 +// CHECK18-NEXT: [[TMP119:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED50]], align 8 +// CHECK18-NEXT: [[TMP120:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK18-NEXT: store i64 [[TMP117]], i64* [[TMP122]], align 8 +// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i64* +// CHECK18-NEXT: store i64 [[TMP117]], i64* [[TMP124]], align 8 +// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP125]], align 8 +// CHECK18-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP127]], align 8 +// CHECK18-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP129]], align 8 +// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP130]], align 8 +// CHECK18-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP132]], align 8 +// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP134]], align 8 +// CHECK18-NEXT: store i64 [[TMP120]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP135]], align 8 +// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* +// CHECK18-NEXT: store i64 [[TMP119]], i64* [[TMP137]], align 8 +// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 3 // CHECK18-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP139]], align 8 -// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i64* -// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP141]], align 8 -// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP142]], align 8 -// CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP143]], align 8 -// CHECK18-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP145]], align 8 -// CHECK18-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP147]], align 8 -// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP148]], align 8 -// CHECK18-NEXT: [[TMP149:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP149]], align 8 -// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP151]], align 8 -// CHECK18-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP153]], align 8 -// CHECK18-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP137]], i64* [[TMP154]], align 8 -// CHECK18-NEXT: [[TMP155:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP155]], align 8 -// CHECK18-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i64* -// CHECK18-NEXT: store i64 [[TMP136]], i64* [[TMP157]], align 8 -// CHECK18-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i64* -// CHECK18-NEXT: store i64 [[TMP136]], i64* [[TMP159]], align 8 -// CHECK18-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 3 -// CHECK18-NEXT: store i64 4, i64* [[TMP160]], align 8 -// CHECK18-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS57]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP161]], align 8 -// CHECK18-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS55]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS56]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES58]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP165:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP165]], i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK18-NEXT: [[TMP166:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_60]], align 4 -// CHECK18-NEXT: [[SUB62:%.*]] = sub nsw i32 [[TMP166]], 0 -// CHECK18-NEXT: [[DIV63:%.*]] = sdiv i32 [[SUB62]], 1 -// CHECK18-NEXT: [[SUB64:%.*]] = sub nsw i32 [[DIV63]], 1 -// CHECK18-NEXT: store i32 [[SUB64]], i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK18-NEXT: [[TMP167:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_61]], align 4 -// CHECK18-NEXT: [[ADD65:%.*]] = add nsw i32 [[TMP167]], 1 -// CHECK18-NEXT: [[TMP168:%.*]] = zext i32 [[ADD65]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP168]]) -// CHECK18-NEXT: [[TMP169:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP162]], i8** [[TMP163]], i64* [[TMP164]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP170:%.*]] = icmp ne i32 [[TMP169]], 0 -// CHECK18-NEXT: br i1 [[TMP170]], label [[OMP_OFFLOAD_FAILED66:%.*]], label [[OMP_OFFLOAD_CONT67:%.*]] -// CHECK18: omp_offload.failed66: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP136]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT67]] -// CHECK18: omp_offload.cont67: -// CHECK18-NEXT: [[TMP171:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK18-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP171]]) +// CHECK18-NEXT: store i64 [[TMP119]], i64* [[TMP139]], align 8 +// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS54]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP140]], align 8 +// CHECK18-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS52]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS53]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP143:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP143]], i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK18-NEXT: [[TMP144:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_56]], align 4 +// CHECK18-NEXT: [[SUB58:%.*]] = sub nsw i32 [[TMP144]], 0 +// CHECK18-NEXT: [[DIV59:%.*]] = sdiv i32 [[SUB58]], 1 +// CHECK18-NEXT: [[SUB60:%.*]] = sub nsw i32 [[DIV59]], 1 +// CHECK18-NEXT: store i32 [[SUB60]], i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK18-NEXT: [[TMP145:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_57]], align 4 +// CHECK18-NEXT: [[ADD61:%.*]] = add nsw i32 [[TMP145]], 1 +// CHECK18-NEXT: [[TMP146:%.*]] = zext i32 [[ADD61]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP146]]) +// CHECK18-NEXT: [[TMP147:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP141]], i8** [[TMP142]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP148:%.*]] = icmp ne i32 [[TMP147]], 0 +// CHECK18-NEXT: br i1 [[TMP148]], label [[OMP_OFFLOAD_FAILED62:%.*]], label [[OMP_OFFLOAD_CONT63:%.*]] +// CHECK18: omp_offload.failed62: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP119]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT63]] +// CHECK18: omp_offload.cont63: +// CHECK18-NEXT: [[TMP149:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK18-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP149]]) // CHECK18-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK18-NEXT: [[TMP172:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP172]]) -// CHECK18-NEXT: [[TMP173:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK18-NEXT: ret i32 [[TMP173]] +// CHECK18-NEXT: [[TMP150:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP150]]) +// CHECK18-NEXT: [[TMP151:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK18-NEXT: ret i32 [[TMP151]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -22972,11 +22738,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -23063,7 +22829,7 @@ // CHECK18-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP22]], i32* [[CONV9]], align 4, !llvm.access.group !28 // CHECK18-NEXT: [[TMP23:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i64 [[TMP21]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP23]]), !llvm.access.group !28 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -23117,7 +22883,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -23254,11 +23020,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[N_CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -23335,7 +23101,7 @@ // CHECK18-NEXT: [[CONV6:%.*]] = bitcast i64* [[N_CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP19]], i32* [[CONV6]], align 4, !llvm.access.group !34 // CHECK18-NEXT: [[TMP20:%.*]] = load i64, i64* [[N_CASTED]], align 8, !llvm.access.group !34 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !34 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -23366,7 +23132,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -23504,11 +23270,11 @@ // CHECK18-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK18-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -23593,7 +23359,7 @@ // CHECK18-NEXT: [[CONV9:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP21]], i32* [[CONV9]], align 4, !llvm.access.group !40 // CHECK18-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP16]], i64 [[TMP18]], i64 [[TMP20]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP22]]), !llvm.access.group !40 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -23624,7 +23390,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -23784,7 +23550,7 @@ // CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK18-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -23802,7 +23568,7 @@ // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK18-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK18: omp_offload.failed5: @@ -23834,7 +23600,7 @@ // CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK18-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK18: omp_offload.failed11: @@ -23852,7 +23618,7 @@ // CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK18-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK18: omp_offload.failed17: @@ -23884,7 +23650,7 @@ // CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 // CHECK18-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK18-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED26:%.*]], label [[OMP_OFFLOAD_CONT27:%.*]] // CHECK18: omp_offload.failed26: @@ -23900,11 +23666,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -23952,7 +23718,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -23974,7 +23740,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24064,11 +23830,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24116,7 +23882,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -24138,7 +23904,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24236,11 +24002,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24296,7 +24062,7 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -24318,7 +24084,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24433,11 +24199,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24485,7 +24251,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -24507,7 +24273,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24603,11 +24369,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP1]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP2:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP2]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24663,7 +24429,7 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -24685,7 +24451,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -24792,7 +24558,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -24800,38 +24565,34 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK19-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK19-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -24853,312 +24614,278 @@ // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK19-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK19-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK19-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK19-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK19-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK19-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK19-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: -// CHECK19-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK19-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK19-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK19-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK19-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK19-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK19-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK19-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK19: omp_offload.failed15: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK19: omp_offload.cont16: -// CHECK19-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK19-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK19-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK19-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK19-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK19-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK19-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK19-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK19-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK19-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK19-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK19-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK19-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK19-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK19: omp_offload.failed14: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK19: omp_offload.cont15: +// CHECK19-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK19-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK19-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK19-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK19-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK19-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK19-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK19-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK19-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK19-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK19-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK19-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK19-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK19-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK19-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK19-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK19-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK19-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK19-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK19-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK19-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK19-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK19-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK19: omp_offload.failed30: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK19: omp_offload.cont31: -// CHECK19-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK19-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK19-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK19-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK19-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK19-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK19-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK19-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK19-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK19-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK19-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK19-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK19-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK19-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK19-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK19-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK19-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK19: omp_offload.failed44: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK19: omp_offload.cont45: -// CHECK19-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK19-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK19-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK19-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK19-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK19-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK19-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK19-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK19-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK19-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK19-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK19-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK19-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK19-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK19-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK19: omp_offload.failed28: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK19: omp_offload.cont29: +// CHECK19-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK19-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK19-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK19-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK19-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK19-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK19-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK19-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK19-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK19-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK19-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK19-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK19-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK19-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK19-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK19-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK19-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK19-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK19: omp_offload.failed41: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK19: omp_offload.cont42: +// CHECK19-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK19-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK19-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK19-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK19-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK19-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK19-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK19-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK19-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK19-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK19-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK19-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK19-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK19-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK19-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK19-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK19-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK19-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK19-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK19-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK19-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK19-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK19-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK19-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK19-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK19-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK19-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK19-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK19-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK19-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK19-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK19-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK19-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK19: omp_offload.failed60: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK19: omp_offload.cont61: -// CHECK19-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK19-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK19-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK19-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK19-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK19-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK19-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK19-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK19-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK19-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK19-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK19-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK19: omp_offload.failed56: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK19: omp_offload.cont57: +// CHECK19-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK19-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK19-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK19-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK19-NEXT: ret i32 [[TMP177]] +// CHECK19-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK19-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK19-NEXT: ret i32 [[TMP155]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -25654,11 +25381,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25739,7 +25466,7 @@ // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK19-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -25793,7 +25520,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25923,11 +25650,11 @@ // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26000,7 +25727,7 @@ // CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35 // CHECK19-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35 // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -26031,7 +25758,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26161,11 +25888,11 @@ // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26244,7 +25971,7 @@ // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK19-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -26275,7 +26002,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26430,7 +26157,7 @@ // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK19-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -26448,7 +26175,7 @@ // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK19-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK19: omp_offload.failed5: @@ -26479,7 +26206,7 @@ // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK19-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK19: omp_offload.failed11: @@ -26497,7 +26224,7 @@ // CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK19-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK19: omp_offload.failed17: @@ -26528,7 +26255,7 @@ // CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK19-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK19-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK19: omp_offload.failed25: @@ -26544,11 +26271,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26594,7 +26321,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -26616,7 +26343,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26703,11 +26430,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26753,7 +26480,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -26775,7 +26502,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26868,11 +26595,11 @@ // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26924,7 +26651,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -26946,7 +26673,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27055,11 +26782,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27105,7 +26832,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -27127,7 +26854,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27218,11 +26945,11 @@ // CHECK19-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27274,7 +27001,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -27296,7 +27023,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27399,7 +27126,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -27407,38 +27133,34 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED32:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS35:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES36:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_46:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED47:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED48:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS50:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS51:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES52:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED30:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS33:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[_TMP34:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_36:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_43:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED44:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR__CASTED45:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS47:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS48:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP49:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_50:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_51:%.*]] = alloca i32, align 4 // CHECK20-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK20-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK20-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -27460,312 +27182,278 @@ // CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK20-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK20-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK20-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK20-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK20-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK20-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK20-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK20: omp_offload.cont: -// CHECK20-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK20-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK20-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK20-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK20-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK20-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK20-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK20-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK20: omp_offload.failed15: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK20: omp_offload.cont16: -// CHECK20-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK20-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK20-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK20-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK20-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK20-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK20-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK20-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK20-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK20-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK20-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK20-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK20-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK20-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK20: omp_offload.failed14: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l143(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK20: omp_offload.cont15: +// CHECK20-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK20-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK20-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK20-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK20-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK20-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK20-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK20-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK20-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK20-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK20-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK20-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK20-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK20-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK20-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK20-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK20-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK20-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK20-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK20-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK20-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK20-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK20-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK20-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK20-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK20-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK20-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP101]]) -// CHECK20-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK20-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK20: omp_offload.failed30: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK20: omp_offload.cont31: -// CHECK20-NEXT: [[TMP104:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP104]], i32* [[N_CASTED32]], align 4 -// CHECK20-NEXT: [[TMP105:%.*]] = load i32, i32* [[N_CASTED32]], align 4 -// CHECK20-NEXT: [[TMP106:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP107:%.*]] = sext i32 [[TMP106]] to i64 -// CHECK20-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP109:%.*]] = bitcast i8** [[TMP108]] to i32* -// CHECK20-NEXT: store i32 [[TMP105]], i32* [[TMP109]], align 4 -// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i32* -// CHECK20-NEXT: store i32 [[TMP105]], i32* [[TMP111]], align 4 -// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP112]], align 4 -// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP113]], align 4 -// CHECK20-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP115:%.*]] = bitcast i8** [[TMP114]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP115]], align 4 -// CHECK20-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP117]], align 4 -// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP118]], align 4 -// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP119]], align 4 -// CHECK20-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP121]], align 4 -// CHECK20-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP123]], align 4 -// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP107]], i64* [[TMP124]], align 4 -// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS35]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP125]], align 4 -// CHECK20-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS34]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES36]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP129:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP129]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP130]], 0 -// CHECK20-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 -// CHECK20-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 -// CHECK20-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK20-NEXT: [[TMP131:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 -// CHECK20-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP131]], 1 -// CHECK20-NEXT: [[TMP132:%.*]] = zext i32 [[ADD43]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP132]]) -// CHECK20-NEXT: [[TMP133:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP126]], i8** [[TMP127]], i64* [[TMP128]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP134:%.*]] = icmp ne i32 [[TMP133]], 0 -// CHECK20-NEXT: br i1 [[TMP134]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] -// CHECK20: omp_offload.failed44: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP105]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT45]] -// CHECK20: omp_offload.cont45: -// CHECK20-NEXT: [[TMP135:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP135]], i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK20-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP136]], i32* [[N_CASTED47]], align 4 -// CHECK20-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED47]], align 4 -// CHECK20-NEXT: [[TMP138:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_46]], align 4 -// CHECK20-NEXT: store i32 [[TMP138]], i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK20-NEXT: [[TMP139:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED48]], align 4 -// CHECK20-NEXT: [[TMP140:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP141:%.*]] = sext i32 [[TMP140]] to i64 -// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK20-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK20-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK20-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK20-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK20-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK20-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP88]]) +// CHECK20-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK20-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK20: omp_offload.failed28: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l147(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK20: omp_offload.cont29: +// CHECK20-NEXT: [[TMP91:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[N_CASTED30]], align 4 +// CHECK20-NEXT: [[TMP92:%.*]] = load i32, i32* [[N_CASTED30]], align 4 +// CHECK20-NEXT: [[TMP93:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP94:%.*]] = sext i32 [[TMP93]] to i64 +// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP96:%.*]] = bitcast i8** [[TMP95]] to i32* +// CHECK20-NEXT: store i32 [[TMP92]], i32* [[TMP96]], align 4 +// CHECK20-NEXT: [[TMP97:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* +// CHECK20-NEXT: store i32 [[TMP92]], i32* [[TMP98]], align 4 +// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK20-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP101]], align 4 +// CHECK20-NEXT: [[TMP102:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP103]], align 4 +// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK20-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP106]], align 4 +// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP108]], align 4 +// CHECK20-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS33]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS32]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP112:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP112]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[SUB37:%.*]] = sub nsw i32 [[TMP113]], 0 +// CHECK20-NEXT: [[DIV38:%.*]] = sdiv i32 [[SUB37]], 1 +// CHECK20-NEXT: [[SUB39:%.*]] = sub nsw i32 [[DIV38]], 1 +// CHECK20-NEXT: store i32 [[SUB39]], i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK20-NEXT: [[TMP114:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_36]], align 4 +// CHECK20-NEXT: [[ADD40:%.*]] = add nsw i32 [[TMP114]], 1 +// CHECK20-NEXT: [[TMP115:%.*]] = zext i32 [[ADD40]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP115]]) +// CHECK20-NEXT: [[TMP116:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151.region_id, i32 3, i8** [[TMP110]], i8** [[TMP111]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP117:%.*]] = icmp ne i32 [[TMP116]], 0 +// CHECK20-NEXT: br i1 [[TMP117]], label [[OMP_OFFLOAD_FAILED41:%.*]], label [[OMP_OFFLOAD_CONT42:%.*]] +// CHECK20: omp_offload.failed41: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l151(i32 [[TMP92]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT42]] +// CHECK20: omp_offload.cont42: +// CHECK20-NEXT: [[TMP118:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP118]], i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK20-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP119]], i32* [[N_CASTED44]], align 4 +// CHECK20-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED44]], align 4 +// CHECK20-NEXT: [[TMP121:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_43]], align 4 +// CHECK20-NEXT: store i32 [[TMP121]], i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK20-NEXT: [[TMP122:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED45]], align 4 +// CHECK20-NEXT: [[TMP123:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP124:%.*]] = sext i32 [[TMP123]] to i64 +// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP126]], align 4 +// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP128]], align 4 +// CHECK20-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP131]], align 4 +// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP133]], align 4 +// CHECK20-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP136]], align 4 +// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP138]], align 4 +// CHECK20-NEXT: store i64 [[TMP124]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* +// CHECK20-NEXT: store i32 [[TMP122]], i32* [[TMP141]], align 4 +// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 3 // CHECK20-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP143]], align 4 -// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP145]], align 4 -// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP146]], align 4 -// CHECK20-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP147]], align 4 -// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP149]], align 4 -// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP151]], align 4 -// CHECK20-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP152]], align 4 -// CHECK20-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP153]], align 4 -// CHECK20-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 4 -// CHECK20-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 4 -// CHECK20-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP141]], i64* [[TMP158]], align 4 -// CHECK20-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP159]], align 4 -// CHECK20-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32* -// CHECK20-NEXT: store i32 [[TMP139]], i32* [[TMP161]], align 4 -// CHECK20-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP163:%.*]] = bitcast i8** [[TMP162]] to i32* -// CHECK20-NEXT: store i32 [[TMP139]], i32* [[TMP163]], align 4 -// CHECK20-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP164]], align 4 -// CHECK20-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS51]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP165]], align 4 -// CHECK20-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS49]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP167:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS50]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP168:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES52]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP169:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP169]], i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK20-NEXT: [[TMP170:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 -// CHECK20-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP170]], 0 -// CHECK20-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 -// CHECK20-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 -// CHECK20-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK20-NEXT: [[TMP171:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 -// CHECK20-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP171]], 1 -// CHECK20-NEXT: [[TMP172:%.*]] = zext i32 [[ADD59]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP172]]) -// CHECK20-NEXT: [[TMP173:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP166]], i8** [[TMP167]], i64* [[TMP168]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP174:%.*]] = icmp ne i32 [[TMP173]], 0 -// CHECK20-NEXT: br i1 [[TMP174]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] -// CHECK20: omp_offload.failed60: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP139]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT61]] -// CHECK20: omp_offload.cont61: -// CHECK20-NEXT: [[TMP175:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP175]]) +// CHECK20-NEXT: store i32 [[TMP122]], i32* [[TMP143]], align 4 +// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS48]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP144]], align 4 +// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS46]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS47]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP147:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP147]], i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK20-NEXT: [[TMP148:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_50]], align 4 +// CHECK20-NEXT: [[SUB52:%.*]] = sub nsw i32 [[TMP148]], 0 +// CHECK20-NEXT: [[DIV53:%.*]] = sdiv i32 [[SUB52]], 1 +// CHECK20-NEXT: [[SUB54:%.*]] = sub nsw i32 [[DIV53]], 1 +// CHECK20-NEXT: store i32 [[SUB54]], i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK20-NEXT: [[TMP149:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_51]], align 4 +// CHECK20-NEXT: [[ADD55:%.*]] = add nsw i32 [[TMP149]], 1 +// CHECK20-NEXT: [[TMP150:%.*]] = zext i32 [[ADD55]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP150]]) +// CHECK20-NEXT: [[TMP151:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155.region_id, i32 4, i8** [[TMP145]], i8** [[TMP146]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP152:%.*]] = icmp ne i32 [[TMP151]], 0 +// CHECK20-NEXT: br i1 [[TMP152]], label [[OMP_OFFLOAD_FAILED56:%.*]], label [[OMP_OFFLOAD_CONT57:%.*]] +// CHECK20: omp_offload.failed56: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l155(i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP122]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT57]] +// CHECK20: omp_offload.cont57: +// CHECK20-NEXT: [[TMP153:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP153]]) // CHECK20-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK20-NEXT: [[TMP176:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP176]]) -// CHECK20-NEXT: [[TMP177:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK20-NEXT: ret i32 [[TMP177]] +// CHECK20-NEXT: [[TMP154:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP154]]) +// CHECK20-NEXT: [[TMP155:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK20-NEXT: ret i32 [[TMP155]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l139 @@ -28261,11 +27949,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28346,7 +28034,7 @@ // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK20-NEXT: store i32 [[TMP20]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32 [[TMP19]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP21]]), !llvm.access.group !29 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -28400,7 +28088,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28530,11 +28218,11 @@ // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[N_ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[N_CASTED]], align 4 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[N_CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28607,7 +28295,7 @@ // CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[N_ADDR]], align 4, !llvm.access.group !35 // CHECK20-NEXT: store i32 [[TMP17]], i32* [[N_CASTED]], align 4, !llvm.access.group !35 // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[N_CASTED]], align 4, !llvm.access.group !35 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]]), !llvm.access.group !35 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -28638,7 +28326,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28768,11 +28456,11 @@ // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28851,7 +28539,7 @@ // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK20-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP15]], i32 [[TMP16]], i32 [[TMP18]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP20]]), !llvm.access.group !41 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -28882,7 +28570,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29037,7 +28725,7 @@ // CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l112.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK20-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -29055,7 +28743,7 @@ // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK20-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK20: omp_offload.failed5: @@ -29086,7 +28774,7 @@ // CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP33:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l120.region_id, i32 2, i8** [[TMP31]], i8** [[TMP32]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP34:%.*]] = icmp ne i32 [[TMP33]], 0 // CHECK20-NEXT: br i1 [[TMP34]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK20: omp_offload.failed11: @@ -29104,7 +28792,7 @@ // CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l124.region_id, i32 1, i8** [[TMP40]], i8** [[TMP41]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK20-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK20: omp_offload.failed17: @@ -29135,7 +28823,7 @@ // CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK20-NEXT: [[TMP58:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP59:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l128.region_id, i32 2, i8** [[TMP57]], i8** [[TMP58]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP60:%.*]] = icmp ne i32 [[TMP59]], 0 // CHECK20-NEXT: br i1 [[TMP60]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK20: omp_offload.failed25: @@ -29151,11 +28839,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29201,7 +28889,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -29223,7 +28911,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29310,11 +28998,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29360,7 +29048,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -29382,7 +29070,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29475,11 +29163,11 @@ // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29531,7 +29219,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -29553,7 +29241,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29662,11 +29350,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29712,7 +29400,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -29734,7 +29422,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29825,11 +29513,11 @@ // CHECK20-NEXT: [[TMP1:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP1]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP2]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29881,7 +29569,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -29903,7 +29591,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_simd_codegen.cpp @@ -330,7 +330,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK1-NEXT: [[_TMP29:%.*]] = alloca i32, align 4 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -509,97 +508,80 @@ // CHECK1-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK1-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK1-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK1-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK1-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK1-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK1-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK1-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK1-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK1-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK1-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK1-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 // CHECK1-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK1-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK1-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK1-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK1-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK1-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK1-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK1-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK1-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK1-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK1-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK1-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK1-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK1-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK1-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK1-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK1-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK1-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK1-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK1-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK1-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 // CHECK1-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK1-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 // CHECK1-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK1-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 // CHECK1-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK1-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK1-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 -// CHECK1-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK1-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK1-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 -// CHECK1-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK1-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK1-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 -// CHECK1-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 -// CHECK1-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK1-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK1-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 -// CHECK1-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK1-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK1-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 -// CHECK1-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK1-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK1-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK1-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 -// CHECK1-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK1-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK1-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK1-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 +// CHECK1-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK1-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK1-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 +// CHECK1-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK1-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK1-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK1-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 +// CHECK1-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK1-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK1-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 +// CHECK1-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK1-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK1-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 +// CHECK1-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK1-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK1-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK1-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK1-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK1-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK1-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK1-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK1: omp_offload.failed30: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT31]] @@ -609,10 +591,10 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END33]] // CHECK1: omp_if.end33: -// CHECK1-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK1-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK1-NEXT: ret i32 [[TMP154]] +// CHECK1-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK1-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK1-NEXT: ret i32 [[TMP144]] // // // CHECK1-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -1314,7 +1296,6 @@ // CHECK1-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK1-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK1-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK1-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK1-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK1-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -1346,57 +1327,47 @@ // CHECK1-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK1-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK1-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK1-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK1-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK1-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK1-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK1-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK1-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK1-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK1-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK1-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK1-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK1-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK1-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK1-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK1-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK1-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK1-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK1-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK1-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK1-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK1-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK1-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK1-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK1-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK1-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK1-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK1-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK1-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK1-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK1-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK1-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK1-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK1-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK1-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK1-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK1-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK1-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK1-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK1-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK1-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK1-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK1-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK1-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK1-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK1-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK1-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK1-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK1-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK1-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK1-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK1-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK1-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK1-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK1-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK1-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK1-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1406,15 +1377,15 @@ // CHECK1-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK1-NEXT: br label [[OMP_IF_END]] // CHECK1: omp_if.end: -// CHECK1-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK1-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK1-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK1-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK1-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK1-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK1-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK1-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK1-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK1-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK1-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK1-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK1-NEXT: ret i32 [[ADD4]] // // @@ -1519,7 +1490,7 @@ // CHECK1-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK1-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK1-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK1-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK1-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1590,7 +1561,7 @@ // CHECK1-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK1-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK1-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK1-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK1-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK1-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK1: omp_offload.failed: @@ -1629,11 +1600,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK1-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK1-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1767,11 +1738,11 @@ // CHECK1-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK1-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK1-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -1933,11 +1904,11 @@ // CHECK1-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK1-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK1-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK1-NEXT: ret void // // -// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK1-NEXT: entry: // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2069,7 +2040,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK2-NEXT: [[_TMP29:%.*]] = alloca i32, align 4 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -2248,97 +2218,80 @@ // CHECK2-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK2-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK2-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK2-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK2-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK2-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK2-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK2-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK2-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK2-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK2-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK2-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 // CHECK2-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK2-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK2-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK2-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK2-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK2-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK2-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK2-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK2-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK2-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK2-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK2-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK2-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK2-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK2-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK2-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK2-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK2-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK2-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK2-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK2-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 // CHECK2-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK2-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK2-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 // CHECK2-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK2-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 // CHECK2-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK2-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK2-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 -// CHECK2-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK2-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK2-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 -// CHECK2-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK2-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK2-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 -// CHECK2-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 -// CHECK2-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK2-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK2-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK2-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 -// CHECK2-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK2-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK2-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 -// CHECK2-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK2-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK2-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK2-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 -// CHECK2-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK2-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK2-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK2-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 +// CHECK2-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK2-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK2-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 +// CHECK2-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK2-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK2-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK2-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 +// CHECK2-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK2-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK2-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 +// CHECK2-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK2-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK2-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 +// CHECK2-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK2-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK2-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK2-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK2-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK2-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK2-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK2-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK2: omp_offload.failed30: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT31]] @@ -2348,10 +2301,10 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END33]] // CHECK2: omp_if.end33: -// CHECK2-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK2-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK2-NEXT: ret i32 [[TMP154]] +// CHECK2-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK2-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK2-NEXT: ret i32 [[TMP144]] // // // CHECK2-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -3053,7 +3006,6 @@ // CHECK2-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK2-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK2-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK2-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK2-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK2-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -3085,57 +3037,47 @@ // CHECK2-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK2-NEXT: store double* [[A]], double** [[TMP13]], align 8 -// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK2-NEXT: store i64 8, i64* [[TMP14]], align 8 -// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK2-NEXT: store i8* null, i8** [[TMP15]], align 8 -// CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK2-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK2-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP19]], align 8 -// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK2-NEXT: store i64 4, i64* [[TMP20]], align 8 -// CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK2-NEXT: store i8* null, i8** [[TMP21]], align 8 -// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK2-NEXT: store i8* null, i8** [[TMP14]], align 8 +// CHECK2-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK2-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK2-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK2-NEXT: store i64 [[TMP6]], i64* [[TMP18]], align 8 +// CHECK2-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK2-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK2-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK2-NEXT: store i64 2, i64* [[TMP21]], align 8 +// CHECK2-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK2-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* // CHECK2-NEXT: store i64 2, i64* [[TMP23]], align 8 -// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK2-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* -// CHECK2-NEXT: store i64 2, i64* [[TMP25]], align 8 -// CHECK2-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK2-NEXT: store i64 8, i64* [[TMP26]], align 8 -// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK2-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK2-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 -// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 -// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK2-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK2-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK2-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 8 -// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK2-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 8 -// CHECK2-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK2-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 8 -// CHECK2-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK2-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK2-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK2-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK2-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK2-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP26]], align 8 +// CHECK2-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK2-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK2-NEXT: store i64 [[TMP2]], i64* [[TMP28]], align 8 +// CHECK2-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK2-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK2-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 8 +// CHECK2-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK2-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK2-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 8 +// CHECK2-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK2-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK2-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK2-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK2-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK2-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK2-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK2-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK2-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK2-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK2-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3145,15 +3087,15 @@ // CHECK2-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP6]], i64 2, i64 [[TMP2]], i16* [[VLA]]) #[[ATTR4]] // CHECK2-NEXT: br label [[OMP_IF_END]] // CHECK2: omp_if.end: -// CHECK2-NEXT: [[TMP45:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP45]] +// CHECK2-NEXT: [[TMP39:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP39]] // CHECK2-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK2-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK2-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP47]] -// CHECK2-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK2-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK2-NEXT: [[CONV3:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK2-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK2-NEXT: [[ADD4:%.*]] = add nsw i32 [[CONV3]], [[TMP41]] +// CHECK2-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK2-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK2-NEXT: ret i32 [[ADD4]] // // @@ -3258,7 +3200,7 @@ // CHECK2-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK2-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK2-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK2-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK2-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK2-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3329,7 +3271,7 @@ // CHECK2-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK2-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK2-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK2-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK2-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK2-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK2: omp_offload.failed: @@ -3368,11 +3310,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[B_CASTED]] to i32* // CHECK2-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK2-NEXT: [[TMP5:%.*]] = load i64, i64* [[B_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3506,11 +3448,11 @@ // CHECK2-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK2-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK2-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3672,11 +3614,11 @@ // CHECK2-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK2-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK2-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK2-NEXT: ret void // // -// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK2-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK2-NEXT: entry: // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3808,7 +3750,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK3-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -3982,97 +3923,80 @@ // CHECK3-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK3-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK3-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK3-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK3-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK3-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK3-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK3-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK3-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK3-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK3-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK3-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 // CHECK3-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK3-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK3-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK3-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK3-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK3-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK3-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK3-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK3-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK3-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK3-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK3-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK3-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK3-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK3-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK3-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK3-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK3-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK3-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK3-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 // CHECK3-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK3-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 // CHECK3-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK3-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 // CHECK3-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK3-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK3-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 -// CHECK3-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK3-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK3-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 -// CHECK3-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK3-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK3-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 -// CHECK3-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 -// CHECK3-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK3-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK3-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 -// CHECK3-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK3-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK3-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 -// CHECK3-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK3-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK3-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK3-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 -// CHECK3-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK3-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK3-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK3-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 +// CHECK3-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK3-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK3-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 +// CHECK3-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK3-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK3-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK3-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 +// CHECK3-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK3-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK3-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 +// CHECK3-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK3-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK3-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 +// CHECK3-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK3-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK3-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK3-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK3-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] +// CHECK3-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK3-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK3-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK3: omp_offload.failed25: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT26]] @@ -4082,10 +4006,10 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END28]] // CHECK3: omp_if.end28: -// CHECK3-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK3-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK3-NEXT: ret i32 [[TMP154]] +// CHECK3-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK3-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK3-NEXT: ret i32 [[TMP144]] // // // CHECK3-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -4774,7 +4698,6 @@ // CHECK3-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK3-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK3-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK3-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK3-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK3-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -4805,57 +4728,47 @@ // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK3-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK3-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK3-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK3-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK3-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK3-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK3-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK3-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK3-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK3-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK3-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK3-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK3-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK3-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK3-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK3-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK3-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK3-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK3-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK3-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK3-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK3-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK3-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK3-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK3-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK3-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK3-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK3-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK3-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK3-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK3-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK3-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK3-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK3-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK3-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK3-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK3-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK3-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK3-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK3-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK3-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK3-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK3-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK3-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK3-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK3-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK3-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK3-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK3-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK3-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK3-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK3-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK3-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK3-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK3-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK3-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK3-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK3-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4865,15 +4778,15 @@ // CHECK3-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK3-NEXT: br label [[OMP_IF_END]] // CHECK3: omp_if.end: -// CHECK3-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK3-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK3-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK3-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK3-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK3-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK3-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK3-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK3-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK3-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK3-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK3-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK3-NEXT: ret i32 [[ADD3]] // // @@ -4976,7 +4889,7 @@ // CHECK3-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK3-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK3-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK3-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK3-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK3-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -5046,7 +4959,7 @@ // CHECK3-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK3-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK3-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK3-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK3-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK3-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK3: omp_offload.failed: @@ -5083,11 +4996,11 @@ // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK3-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5216,11 +5129,11 @@ // CHECK3-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK3-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5378,11 +5291,11 @@ // CHECK3-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK3-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK3-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK3-NEXT: ret void // // -// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK3-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK3-NEXT: entry: // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5513,7 +5426,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK4-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -5687,97 +5599,80 @@ // CHECK4-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK4-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK4-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK4-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK4-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK4-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK4-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK4-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK4-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK4-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK4-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK4-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 // CHECK4-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK4-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK4-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK4-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK4-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK4-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK4-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK4-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK4-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK4-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK4-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK4-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK4-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK4-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK4-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK4-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK4-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK4-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK4-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK4-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 // CHECK4-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK4-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 // CHECK4-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK4-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 // CHECK4-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK4-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK4-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK4-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK4-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 -// CHECK4-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK4-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK4-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 -// CHECK4-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK4-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK4-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK4-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 -// CHECK4-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 -// CHECK4-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK4-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK4-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK4-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 -// CHECK4-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK4-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK4-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 -// CHECK4-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK4-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK4-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK4-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 -// CHECK4-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK4-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK4-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK4-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 +// CHECK4-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK4-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK4-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 +// CHECK4-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK4-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK4-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK4-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 +// CHECK4-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK4-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK4-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 +// CHECK4-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK4-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK4-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 +// CHECK4-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK4-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK4-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK4-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK4-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] +// CHECK4-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK4-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK4-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK4: omp_offload.failed25: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT26]] @@ -5787,10 +5682,10 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END28]] // CHECK4: omp_if.end28: -// CHECK4-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK4-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK4-NEXT: ret i32 [[TMP154]] +// CHECK4-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK4-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK4-NEXT: ret i32 [[TMP144]] // // // CHECK4-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -6479,7 +6374,6 @@ // CHECK4-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK4-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK4-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK4-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK4-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK4-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -6510,57 +6404,47 @@ // CHECK4-NEXT: [[TMP12:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to double** // CHECK4-NEXT: store double* [[A]], double** [[TMP13]], align 4 -// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK4-NEXT: store i64 8, i64* [[TMP14]], align 4 -// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK4-NEXT: store i8* null, i8** [[TMP15]], align 4 -// CHECK4-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK4-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK4-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* -// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP19]], align 4 -// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK4-NEXT: store i64 4, i64* [[TMP20]], align 4 -// CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK4-NEXT: store i8* null, i8** [[TMP21]], align 4 -// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK4-NEXT: store i8* null, i8** [[TMP14]], align 4 +// CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK4-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* +// CHECK4-NEXT: store i32 [[TMP5]], i32* [[TMP18]], align 4 +// CHECK4-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK4-NEXT: store i8* null, i8** [[TMP19]], align 4 +// CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK4-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK4-NEXT: store i32 2, i32* [[TMP21]], align 4 +// CHECK4-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK4-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* // CHECK4-NEXT: store i32 2, i32* [[TMP23]], align 4 -// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK4-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i32* -// CHECK4-NEXT: store i32 2, i32* [[TMP25]], align 4 -// CHECK4-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK4-NEXT: store i64 4, i64* [[TMP26]], align 4 -// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK4-NEXT: store i8* null, i8** [[TMP27]], align 4 -// CHECK4-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 -// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* -// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 -// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK4-NEXT: store i64 4, i64* [[TMP32]], align 4 -// CHECK4-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK4-NEXT: store i8* null, i8** [[TMP33]], align 4 -// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP35]], align 4 -// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK4-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i16** -// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP37]], align 4 -// CHECK4-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK4-NEXT: store i64 [[TMP9]], i64* [[TMP38]], align 4 -// CHECK4-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK4-NEXT: store i8* null, i8** [[TMP39]], align 4 -// CHECK4-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK4-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK4-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK4-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP26]], align 4 +// CHECK4-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK4-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK4-NEXT: store i32 [[TMP1]], i32* [[TMP28]], align 4 +// CHECK4-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK4-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK4-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP31]], align 4 +// CHECK4-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK4-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i16** +// CHECK4-NEXT: store i16* [[VLA]], i16** [[TMP33]], align 4 +// CHECK4-NEXT: store i64 [[TMP9]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK4-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK4-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK4-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK4-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK4-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP40]], i8** [[TMP41]], i64* [[TMP42]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK4-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 -// CHECK4-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK4-NEXT: [[TMP37:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 5, i8** [[TMP35]], i8** [[TMP36]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK4-NEXT: [[TMP38:%.*]] = icmp ne i32 [[TMP37]], 0 +// CHECK4-NEXT: br i1 [[TMP38]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6570,15 +6454,15 @@ // CHECK4-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP5]], i32 2, i32 [[TMP1]], i16* [[VLA]]) #[[ATTR4]] // CHECK4-NEXT: br label [[OMP_IF_END]] // CHECK4: omp_if.end: -// CHECK4-NEXT: [[TMP45:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP45]] +// CHECK4-NEXT: [[TMP39:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK4-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP39]] // CHECK4-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK4-NEXT: [[TMP46:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 -// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP46]] to i32 -// CHECK4-NEXT: [[TMP47:%.*]] = load i32, i32* [[B]], align 4 -// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP47]] -// CHECK4-NEXT: [[TMP48:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP48]]) +// CHECK4-NEXT: [[TMP40:%.*]] = load i16, i16* [[ARRAYIDX2]], align 2 +// CHECK4-NEXT: [[CONV:%.*]] = sext i16 [[TMP40]] to i32 +// CHECK4-NEXT: [[TMP41:%.*]] = load i32, i32* [[B]], align 4 +// CHECK4-NEXT: [[ADD3:%.*]] = add nsw i32 [[CONV]], [[TMP41]] +// CHECK4-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK4-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) // CHECK4-NEXT: ret i32 [[ADD3]] // // @@ -6681,7 +6565,7 @@ // CHECK4-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK4-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK4-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK4-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK4-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK4-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6751,7 +6635,7 @@ // CHECK4-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK4-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK4-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK4-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK4-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK4-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK4: omp_offload.failed: @@ -6788,11 +6672,11 @@ // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[B_ADDR]], align 4 // CHECK4-NEXT: store i32 [[TMP4]], i32* [[B_CASTED]], align 4 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[B_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6921,11 +6805,11 @@ // CHECK4-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK4-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7083,11 +6967,11 @@ // CHECK4-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK4-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK4-NEXT: ret void // // -// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK4-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK4-NEXT: entry: // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7218,7 +7102,6 @@ // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK5-NEXT: [[_TMP29:%.*]] = alloca i32, align 4 // CHECK5-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -7397,97 +7280,80 @@ // CHECK5-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK5-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK5-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK5-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK5-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK5-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 -// CHECK5-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK5-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK5-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK5-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK5-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK5-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 -// CHECK5-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK5-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 +// CHECK5-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK5-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK5-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK5-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK5-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 +// CHECK5-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK5-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK5-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 // CHECK5-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK5-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 -// CHECK5-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK5-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK5-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK5-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 -// CHECK5-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK5-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK5-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK5-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK5-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK5-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK5-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK5-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 -// CHECK5-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK5-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK5-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK5-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK5-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK5-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK5-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 +// CHECK5-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK5-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK5-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK5-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK5-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK5-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK5-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 +// CHECK5-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK5-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK5-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK5-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK5-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK5-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK5-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK5-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK5-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK5-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK5-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 // CHECK5-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK5-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 // CHECK5-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK5-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK5-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK5-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 // CHECK5-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK5-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK5-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK5-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK5-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 -// CHECK5-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK5-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 -// CHECK5-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK5-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 -// CHECK5-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK5-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK5-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK5-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 -// CHECK5-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK5-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 -// CHECK5-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK5-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 -// CHECK5-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK5-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK5-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK5-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 -// CHECK5-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK5-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK5-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 -// CHECK5-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK5-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK5-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK5-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 -// CHECK5-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK5-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK5-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK5-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 +// CHECK5-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK5-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 +// CHECK5-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK5-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 +// CHECK5-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK5-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK5-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK5-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 +// CHECK5-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK5-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 +// CHECK5-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK5-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 +// CHECK5-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK5-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK5-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 +// CHECK5-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK5-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK5-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK5-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK5-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK5-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK5-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK5-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK5: omp_offload.failed30: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT31]] @@ -7497,10 +7363,10 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END33]] // CHECK5: omp_if.end33: -// CHECK5-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK5-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK5-NEXT: ret i32 [[TMP154]] +// CHECK5-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK5-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK5-NEXT: ret i32 [[TMP144]] // // // CHECK5-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -8204,7 +8070,6 @@ // CHECK5-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK5-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8 -// CHECK5-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8 // CHECK5-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK5-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK5-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -8246,67 +8111,55 @@ // CHECK5-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK5-NEXT: store double* [[A]], double** [[TMP16]], align 8 -// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK5-NEXT: store i64 8, i64* [[TMP17]], align 8 -// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK5-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK5-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* -// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP20]], align 8 -// CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK5-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* -// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP22]], align 8 -// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK5-NEXT: store i64 4, i64* [[TMP23]], align 8 -// CHECK5-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK5-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK5-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK5-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 8 +// CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK5-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK5-NEXT: store i64 [[TMP7]], i64* [[TMP21]], align 8 +// CHECK5-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK5-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK5-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK5-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK5-NEXT: store i64 2, i64* [[TMP24]], align 8 +// CHECK5-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK5-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* // CHECK5-NEXT: store i64 2, i64* [[TMP26]], align 8 -// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK5-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* -// CHECK5-NEXT: store i64 2, i64* [[TMP28]], align 8 -// CHECK5-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK5-NEXT: store i64 8, i64* [[TMP29]], align 8 -// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK5-NEXT: store i8* null, i8** [[TMP30]], align 8 -// CHECK5-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* -// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP32]], align 8 -// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK5-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* -// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP34]], align 8 -// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK5-NEXT: store i64 8, i64* [[TMP35]], align 8 -// CHECK5-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK5-NEXT: store i8* null, i8** [[TMP36]], align 8 -// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 8 -// CHECK5-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK5-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 8 -// CHECK5-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK5-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 8 -// CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK5-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK5-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK5-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* +// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 +// CHECK5-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK5-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* +// CHECK5-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 +// CHECK5-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK5-NEXT: store i8* null, i8** [[TMP32]], align 8 +// CHECK5-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 8 +// CHECK5-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK5-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK5-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 8 +// CHECK5-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK5-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK5-NEXT: store i8* null, i8** [[TMP37]], align 8 +// CHECK5-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* +// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP39]], align 8 +// CHECK5-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK5-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* +// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP41]], align 8 +// CHECK5-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK5-NEXT: store i8* null, i8** [[TMP42]], align 8 -// CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK5-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64* -// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP44]], align 8 -// CHECK5-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK5-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* -// CHECK5-NEXT: store i64 [[TMP9]], i64* [[TMP46]], align 8 -// CHECK5-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK5-NEXT: store i64 1, i64* [[TMP47]], align 8 -// CHECK5-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK5-NEXT: store i8* null, i8** [[TMP48]], align 8 -// CHECK5-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK5-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK5-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK5-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK5-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 -// CHECK5-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK5-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK5-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK5-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -8316,15 +8169,15 @@ // CHECK5-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK5-NEXT: br label [[OMP_IF_END]] // CHECK5: omp_if.end: -// CHECK5-NEXT: [[TMP54:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP54]] +// CHECK5-NEXT: [[TMP47:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK5-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP47]] // CHECK5-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK5-NEXT: [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 -// CHECK5-NEXT: [[CONV6:%.*]] = sext i16 [[TMP55]] to i32 -// CHECK5-NEXT: [[TMP56:%.*]] = load i32, i32* [[B]], align 4 -// CHECK5-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP56]] -// CHECK5-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) +// CHECK5-NEXT: [[TMP48:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 +// CHECK5-NEXT: [[CONV6:%.*]] = sext i16 [[TMP48]] to i32 +// CHECK5-NEXT: [[TMP49:%.*]] = load i32, i32* [[B]], align 4 +// CHECK5-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP49]] +// CHECK5-NEXT: [[TMP50:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK5-NEXT: call void @llvm.stackrestore(i8* [[TMP50]]) // CHECK5-NEXT: ret i32 [[ADD7]] // // @@ -8429,7 +8282,7 @@ // CHECK5-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK5-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK5-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK5-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK5-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK5-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: @@ -8500,7 +8353,7 @@ // CHECK5-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK5-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK5-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK5-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK5-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK5-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK5-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK5: omp_offload.failed: @@ -8549,11 +8402,11 @@ // CHECK5-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK5-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 // CHECK5-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]]) +// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]]) // CHECK5-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8732,11 +8585,11 @@ // CHECK5-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK5-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK5-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK5-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8898,11 +8751,11 @@ // CHECK5-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK5-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK5-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK5-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK5-NEXT: ret void // // -// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK5-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK5-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK5-NEXT: entry: // CHECK5-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9034,7 +8887,6 @@ // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS26:%.*]] = alloca [9 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS27:%.*]] = alloca [9 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS28:%.*]] = alloca [9 x i8*], align 8 -// CHECK6-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 8 // CHECK6-NEXT: [[_TMP29:%.*]] = alloca i32, align 4 // CHECK6-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -9213,97 +9065,80 @@ // CHECK6-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK6-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i64* // CHECK6-NEXT: store i64 [[TMP90]], i64* [[TMP98]], align 8 -// CHECK6-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK6-NEXT: store i64 4, i64* [[TMP99]], align 8 -// CHECK6-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 -// CHECK6-NEXT: store i8* null, i8** [[TMP100]], align 8 -// CHECK6-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 8 -// CHECK6-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 8 -// CHECK6-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK6-NEXT: store i64 40, i64* [[TMP105]], align 8 -// CHECK6-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 -// CHECK6-NEXT: store i8* null, i8** [[TMP106]], align 8 -// CHECK6-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 0 +// CHECK6-NEXT: store i8* null, i8** [[TMP99]], align 8 +// CHECK6-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 8 +// CHECK6-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK6-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 8 +// CHECK6-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 1 +// CHECK6-NEXT: store i8* null, i8** [[TMP104]], align 8 +// CHECK6-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i64* +// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP106]], align 8 +// CHECK6-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 // CHECK6-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i64* // CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP108]], align 8 -// CHECK6-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 2 -// CHECK6-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i64* -// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP110]], align 8 -// CHECK6-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK6-NEXT: store i64 8, i64* [[TMP111]], align 8 -// CHECK6-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 -// CHECK6-NEXT: store i8* null, i8** [[TMP112]], align 8 -// CHECK6-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK6-NEXT: store float* [[VLA]], float** [[TMP114]], align 8 -// CHECK6-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK6-NEXT: store float* [[VLA]], float** [[TMP116]], align 8 -// CHECK6-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK6-NEXT: store i64 [[TMP92]], i64* [[TMP117]], align 8 -// CHECK6-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 -// CHECK6-NEXT: store i8* null, i8** [[TMP118]], align 8 -// CHECK6-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 8 -// CHECK6-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 8 -// CHECK6-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK6-NEXT: store i64 400, i64* [[TMP123]], align 8 -// CHECK6-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK6-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 2 +// CHECK6-NEXT: store i8* null, i8** [[TMP109]], align 8 +// CHECK6-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK6-NEXT: store float* [[VLA]], float** [[TMP111]], align 8 +// CHECK6-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK6-NEXT: store float* [[VLA]], float** [[TMP113]], align 8 +// CHECK6-NEXT: store i64 [[TMP92]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 8 +// CHECK6-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 3 +// CHECK6-NEXT: store i8* null, i8** [[TMP114]], align 8 +// CHECK6-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 8 +// CHECK6-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK6-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 8 +// CHECK6-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 4 +// CHECK6-NEXT: store i8* null, i8** [[TMP119]], align 8 +// CHECK6-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i64* +// CHECK6-NEXT: store i64 5, i64* [[TMP121]], align 8 +// CHECK6-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i64* +// CHECK6-NEXT: store i64 5, i64* [[TMP123]], align 8 +// CHECK6-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 // CHECK6-NEXT: store i8* null, i8** [[TMP124]], align 8 -// CHECK6-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 // CHECK6-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i64* -// CHECK6-NEXT: store i64 5, i64* [[TMP126]], align 8 -// CHECK6-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 5 +// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP126]], align 8 +// CHECK6-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 // CHECK6-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i64* -// CHECK6-NEXT: store i64 5, i64* [[TMP128]], align 8 -// CHECK6-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK6-NEXT: store i64 8, i64* [[TMP129]], align 8 -// CHECK6-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 5 -// CHECK6-NEXT: store i8* null, i8** [[TMP130]], align 8 -// CHECK6-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 6 -// CHECK6-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* -// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP132]], align 8 -// CHECK6-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 6 -// CHECK6-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i64* -// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP134]], align 8 -// CHECK6-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK6-NEXT: store i64 8, i64* [[TMP135]], align 8 -// CHECK6-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 -// CHECK6-NEXT: store i8* null, i8** [[TMP136]], align 8 -// CHECK6-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 -// CHECK6-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP138]], align 8 -// CHECK6-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 -// CHECK6-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP140]], align 8 -// CHECK6-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK6-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 8 -// CHECK6-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 -// CHECK6-NEXT: store i8* null, i8** [[TMP142]], align 8 -// CHECK6-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 -// CHECK6-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 8 -// CHECK6-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 -// CHECK6-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 8 -// CHECK6-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK6-NEXT: store i64 16, i64* [[TMP147]], align 8 -// CHECK6-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 -// CHECK6-NEXT: store i8* null, i8** [[TMP148]], align 8 -// CHECK6-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK6-NEXT: store i64 [[TMP5]], i64* [[TMP128]], align 8 +// CHECK6-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 6 +// CHECK6-NEXT: store i8* null, i8** [[TMP129]], align 8 +// CHECK6-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 7 +// CHECK6-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP131]], align 8 +// CHECK6-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 7 +// CHECK6-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK6-NEXT: store double* [[VLA1]], double** [[TMP133]], align 8 +// CHECK6-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 8 +// CHECK6-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 7 +// CHECK6-NEXT: store i8* null, i8** [[TMP134]], align 8 +// CHECK6-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 8 +// CHECK6-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 8 +// CHECK6-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 8 +// CHECK6-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK6-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 8 +// CHECK6-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS28]], i64 0, i64 8 +// CHECK6-NEXT: store i8* null, i8** [[TMP139]], align 8 +// CHECK6-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS26]], i32 0, i32 0 +// CHECK6-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS27]], i32 0, i32 0 // CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK6-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK6-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK6-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK6-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK6-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK6-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] // CHECK6: omp_offload.failed30: // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT31]] @@ -9313,10 +9148,10 @@ // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i64 [[TMP90]], [10 x float]* [[B]], i64 [[TMP2]], float* [[VLA]], [5 x [10 x double]]* [[C]], i64 5, i64 [[TMP5]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_IF_END33]] // CHECK6: omp_if.end33: -// CHECK6-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK6-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK6-NEXT: ret i32 [[TMP154]] +// CHECK6-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK6-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK6-NEXT: ret i32 [[TMP144]] // // // CHECK6-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -10020,7 +9855,6 @@ // CHECK6-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 8 // CHECK6-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 8 -// CHECK6-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 8 // CHECK6-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK6-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 8 // CHECK6-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -10062,67 +9896,55 @@ // CHECK6-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK6-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK6-NEXT: store double* [[A]], double** [[TMP16]], align 8 -// CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK6-NEXT: store i64 8, i64* [[TMP17]], align 8 -// CHECK6-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK6-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK6-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* -// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP20]], align 8 -// CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK6-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i64* -// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP22]], align 8 -// CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK6-NEXT: store i64 4, i64* [[TMP23]], align 8 -// CHECK6-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK6-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK6-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK6-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP19]], align 8 +// CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK6-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* +// CHECK6-NEXT: store i64 [[TMP7]], i64* [[TMP21]], align 8 +// CHECK6-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK6-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK6-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK6-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK6-NEXT: store i64 2, i64* [[TMP24]], align 8 +// CHECK6-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK6-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* // CHECK6-NEXT: store i64 2, i64* [[TMP26]], align 8 -// CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK6-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* -// CHECK6-NEXT: store i64 2, i64* [[TMP28]], align 8 -// CHECK6-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK6-NEXT: store i64 8, i64* [[TMP29]], align 8 -// CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK6-NEXT: store i8* null, i8** [[TMP30]], align 8 -// CHECK6-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i64* -// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP32]], align 8 -// CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK6-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i64* -// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP34]], align 8 -// CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK6-NEXT: store i64 8, i64* [[TMP35]], align 8 -// CHECK6-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK6-NEXT: store i8* null, i8** [[TMP36]], align 8 -// CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 8 -// CHECK6-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK6-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 8 -// CHECK6-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK6-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 8 -// CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK6-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK6-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK6-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* +// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP29]], align 8 +// CHECK6-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK6-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* +// CHECK6-NEXT: store i64 [[TMP2]], i64* [[TMP31]], align 8 +// CHECK6-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK6-NEXT: store i8* null, i8** [[TMP32]], align 8 +// CHECK6-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 8 +// CHECK6-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK6-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK6-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 8 +// CHECK6-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 4), align 8 +// CHECK6-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK6-NEXT: store i8* null, i8** [[TMP37]], align 8 +// CHECK6-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* +// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP39]], align 8 +// CHECK6-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK6-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* +// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP41]], align 8 +// CHECK6-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK6-NEXT: store i8* null, i8** [[TMP42]], align 8 -// CHECK6-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK6-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i64* -// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP44]], align 8 -// CHECK6-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK6-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* -// CHECK6-NEXT: store i64 [[TMP9]], i64* [[TMP46]], align 8 -// CHECK6-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK6-NEXT: store i64 1, i64* [[TMP47]], align 8 -// CHECK6-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK6-NEXT: store i8* null, i8** [[TMP48]], align 8 -// CHECK6-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK6-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK6-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK6-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK6-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK6-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 -// CHECK6-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK6-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK6-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK6-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK6: omp_offload.failed: // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -10132,15 +9954,15 @@ // CHECK6-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i64 [[TMP7]], i64 2, i64 [[TMP2]], i16* [[VLA]], i64 [[TMP9]]) #[[ATTR4]] // CHECK6-NEXT: br label [[OMP_IF_END]] // CHECK6: omp_if.end: -// CHECK6-NEXT: [[TMP54:%.*]] = mul nsw i64 1, [[TMP2]] -// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP54]] +// CHECK6-NEXT: [[TMP47:%.*]] = mul nsw i64 1, [[TMP2]] +// CHECK6-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i64 [[TMP47]] // CHECK6-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i64 1 -// CHECK6-NEXT: [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 -// CHECK6-NEXT: [[CONV6:%.*]] = sext i16 [[TMP55]] to i32 -// CHECK6-NEXT: [[TMP56:%.*]] = load i32, i32* [[B]], align 4 -// CHECK6-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP56]] -// CHECK6-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) +// CHECK6-NEXT: [[TMP48:%.*]] = load i16, i16* [[ARRAYIDX5]], align 2 +// CHECK6-NEXT: [[CONV6:%.*]] = sext i16 [[TMP48]] to i32 +// CHECK6-NEXT: [[TMP49:%.*]] = load i32, i32* [[B]], align 4 +// CHECK6-NEXT: [[ADD7:%.*]] = add nsw i32 [[CONV6]], [[TMP49]] +// CHECK6-NEXT: [[TMP50:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK6-NEXT: call void @llvm.stackrestore(i8* [[TMP50]]) // CHECK6-NEXT: ret i32 [[ADD7]] // // @@ -10245,7 +10067,7 @@ // CHECK6-NEXT: [[ADD8:%.*]] = add i32 [[TMP40]], 1 // CHECK6-NEXT: [[TMP41:%.*]] = zext i32 [[ADD8]] to i64 // CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK6-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK6-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK6-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK6-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK6: omp_offload.failed: @@ -10316,7 +10138,7 @@ // CHECK6-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK6-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK6-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK6-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK6-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK6-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK6-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK6: omp_offload.failed: @@ -10365,11 +10187,11 @@ // CHECK6-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK6-NEXT: store i8 [[FROMBOOL]], i8* [[CONV5]], align 1 // CHECK6-NEXT: [[TMP7:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]]) +// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i64, i64, i64, i16*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i64 [[TMP5]], i64 [[TMP1]], i64 [[TMP2]], i16* [[TMP3]], i64 [[TMP7]]) // CHECK6-NEXT: ret void // // -// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i64 noundef [[B:%.*]], i64 noundef [[VLA:%.*]], i64 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK6-NEXT: entry: // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10548,11 +10370,11 @@ // CHECK6-NEXT: [[CONV7:%.*]] = bitcast i64* [[AAA_CASTED]] to i8* // CHECK6-NEXT: store i8 [[TMP7]], i8* [[CONV7]], align 1 // CHECK6-NEXT: [[TMP8:%.*]] = load i64, i64* [[AAA_CASTED]], align 8 -// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK6-NEXT: ret void // // -// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[N:%.*]], i64 noundef [[AA:%.*]], i64 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK6-NEXT: entry: // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10714,11 +10536,11 @@ // CHECK6-NEXT: [[CONV3:%.*]] = bitcast i64* [[AA_CASTED]] to i16* // CHECK6-NEXT: store i16 [[TMP3]], i16* [[CONV3]], align 2 // CHECK6-NEXT: [[TMP4:%.*]] = load i64, i64* [[AA_CASTED]], align 8 -// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK6-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP2]], i64 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK6-NEXT: ret void // // -// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK6-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK6-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[A:%.*]], i64 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK6-NEXT: entry: // CHECK6-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10850,7 +10672,6 @@ // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4 -// CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK7-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 // CHECK7-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -11024,97 +10845,80 @@ // CHECK7-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK7-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK7-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK7-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK7-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK7-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 -// CHECK7-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK7-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK7-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK7-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK7-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK7-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 -// CHECK7-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK7-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 +// CHECK7-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK7-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK7-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK7-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK7-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 +// CHECK7-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK7-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK7-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 // CHECK7-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK7-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK7-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK7-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK7-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 -// CHECK7-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK7-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK7-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK7-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK7-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK7-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK7-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK7-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 -// CHECK7-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK7-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK7-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK7-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK7-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK7-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 +// CHECK7-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK7-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK7-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK7-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK7-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK7-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK7-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 +// CHECK7-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK7-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK7-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK7-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK7-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK7-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK7-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK7-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK7-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK7-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK7-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 // CHECK7-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK7-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 // CHECK7-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK7-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK7-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK7-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 // CHECK7-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK7-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK7-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK7-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK7-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 -// CHECK7-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK7-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 -// CHECK7-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK7-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 -// CHECK7-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK7-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK7-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK7-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 -// CHECK7-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK7-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 -// CHECK7-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK7-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 -// CHECK7-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK7-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK7-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK7-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 -// CHECK7-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK7-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 -// CHECK7-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK7-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 -// CHECK7-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK7-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK7-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK7-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 -// CHECK7-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK7-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK7-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK7-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 +// CHECK7-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK7-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 +// CHECK7-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK7-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 +// CHECK7-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK7-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK7-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK7-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 +// CHECK7-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK7-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 +// CHECK7-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK7-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 +// CHECK7-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK7-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK7-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 +// CHECK7-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK7-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK7-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK7-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK7-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] +// CHECK7-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK7-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK7-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK7: omp_offload.failed25: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT26]] @@ -11124,10 +10928,10 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END28]] // CHECK7: omp_if.end28: -// CHECK7-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK7-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK7-NEXT: ret i32 [[TMP154]] +// CHECK7-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK7-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK7-NEXT: ret i32 [[TMP144]] // // // CHECK7-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -11818,7 +11622,6 @@ // CHECK7-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK7-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4 -// CHECK7-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4 // CHECK7-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK7-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK7-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -11859,67 +11662,55 @@ // CHECK7-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK7-NEXT: store double* [[A]], double** [[TMP16]], align 4 -// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK7-NEXT: store i64 8, i64* [[TMP17]], align 4 -// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK7-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK7-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP20]], align 4 -// CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK7-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* -// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP22]], align 4 -// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK7-NEXT: store i64 4, i64* [[TMP23]], align 4 -// CHECK7-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK7-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK7-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK7-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP19]], align 4 +// CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK7-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK7-NEXT: store i32 [[TMP6]], i32* [[TMP21]], align 4 +// CHECK7-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK7-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK7-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK7-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK7-NEXT: store i32 2, i32* [[TMP24]], align 4 +// CHECK7-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK7-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* // CHECK7-NEXT: store i32 2, i32* [[TMP26]], align 4 -// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK7-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* -// CHECK7-NEXT: store i32 2, i32* [[TMP28]], align 4 -// CHECK7-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK7-NEXT: store i64 4, i64* [[TMP29]], align 4 -// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK7-NEXT: store i8* null, i8** [[TMP30]], align 4 -// CHECK7-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK7-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* -// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 -// CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK7-NEXT: store i64 4, i64* [[TMP35]], align 4 -// CHECK7-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK7-NEXT: store i8* null, i8** [[TMP36]], align 4 -// CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 4 -// CHECK7-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK7-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 4 -// CHECK7-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK7-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 4 -// CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK7-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK7-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK7-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK7-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* +// CHECK7-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 +// CHECK7-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK7-NEXT: store i8* null, i8** [[TMP32]], align 4 +// CHECK7-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 4 +// CHECK7-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK7-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK7-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 4 +// CHECK7-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK7-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK7-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK7-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP39]], align 4 +// CHECK7-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK7-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP41]], align 4 +// CHECK7-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK7-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK7-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK7-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP44]], align 4 -// CHECK7-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK7-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK7-NEXT: store i32 [[TMP8]], i32* [[TMP46]], align 4 -// CHECK7-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK7-NEXT: store i64 1, i64* [[TMP47]], align 4 -// CHECK7-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK7-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK7-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK7-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK7-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK7-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK7-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 -// CHECK7-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK7-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK7-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK7-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -11929,15 +11720,15 @@ // CHECK7-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK7-NEXT: br label [[OMP_IF_END]] // CHECK7: omp_if.end: -// CHECK7-NEXT: [[TMP54:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP54]] +// CHECK7-NEXT: [[TMP47:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK7-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP47]] // CHECK7-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK7-NEXT: [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2 -// CHECK7-NEXT: [[CONV5:%.*]] = sext i16 [[TMP55]] to i32 -// CHECK7-NEXT: [[TMP56:%.*]] = load i32, i32* [[B]], align 4 -// CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], [[TMP56]] -// CHECK7-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) +// CHECK7-NEXT: [[TMP48:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2 +// CHECK7-NEXT: [[CONV5:%.*]] = sext i16 [[TMP48]] to i32 +// CHECK7-NEXT: [[TMP49:%.*]] = load i32, i32* [[B]], align 4 +// CHECK7-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], [[TMP49]] +// CHECK7-NEXT: [[TMP50:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK7-NEXT: call void @llvm.stackrestore(i8* [[TMP50]]) // CHECK7-NEXT: ret i32 [[ADD6]] // // @@ -12040,7 +11831,7 @@ // CHECK7-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK7-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK7-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK7-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK7-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK7-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: @@ -12110,7 +11901,7 @@ // CHECK7-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK7-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK7-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK7-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK7-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK7-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK7-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK7: omp_offload.failed: @@ -12157,11 +11948,11 @@ // CHECK7-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK7-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 // CHECK7-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]]) +// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]]) // CHECK7-NEXT: ret void // // -// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12335,11 +12126,11 @@ // CHECK7-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK7-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK7-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK7-NEXT: ret void // // -// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12497,11 +12288,11 @@ // CHECK7-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK7-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK7-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK7-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK7-NEXT: ret void // // -// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK7-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK7-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK7-NEXT: entry: // CHECK7-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -12632,7 +12423,6 @@ // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [9 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [9 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [9 x i8*], align 4 -// CHECK8-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [9 x i64], align 4 // CHECK8-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 // CHECK8-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -12806,97 +12596,80 @@ // CHECK8-NEXT: [[TMP97:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK8-NEXT: [[TMP98:%.*]] = bitcast i8** [[TMP97]] to i32* // CHECK8-NEXT: store i32 [[TMP88]], i32* [[TMP98]], align 4 -// CHECK8-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK8-NEXT: store i64 4, i64* [[TMP99]], align 4 -// CHECK8-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 -// CHECK8-NEXT: store i8* null, i8** [[TMP100]], align 4 -// CHECK8-NEXT: [[TMP101:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to [10 x float]** -// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP102]], align 4 -// CHECK8-NEXT: [[TMP103:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to [10 x float]** -// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP104]], align 4 -// CHECK8-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK8-NEXT: store i64 40, i64* [[TMP105]], align 4 -// CHECK8-NEXT: [[TMP106:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 -// CHECK8-NEXT: store i8* null, i8** [[TMP106]], align 4 -// CHECK8-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP99:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 0 +// CHECK8-NEXT: store i8* null, i8** [[TMP99]], align 4 +// CHECK8-NEXT: [[TMP100:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP101:%.*]] = bitcast i8** [[TMP100]] to [10 x float]** +// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP101]], align 4 +// CHECK8-NEXT: [[TMP102:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP103:%.*]] = bitcast i8** [[TMP102]] to [10 x float]** +// CHECK8-NEXT: store [10 x float]* [[B]], [10 x float]** [[TMP103]], align 4 +// CHECK8-NEXT: [[TMP104:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 1 +// CHECK8-NEXT: store i8* null, i8** [[TMP104]], align 4 +// CHECK8-NEXT: [[TMP105:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP106:%.*]] = bitcast i8** [[TMP105]] to i32* +// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP106]], align 4 +// CHECK8-NEXT: [[TMP107:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 // CHECK8-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* // CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP108]], align 4 -// CHECK8-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK8-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP110]], align 4 -// CHECK8-NEXT: [[TMP111:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK8-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK8-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 -// CHECK8-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK8-NEXT: [[TMP113:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to float** -// CHECK8-NEXT: store float* [[VLA]], float** [[TMP114]], align 4 -// CHECK8-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to float** -// CHECK8-NEXT: store float* [[VLA]], float** [[TMP116]], align 4 -// CHECK8-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK8-NEXT: store i64 [[TMP91]], i64* [[TMP117]], align 4 -// CHECK8-NEXT: [[TMP118:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 -// CHECK8-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK8-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to [5 x [10 x double]]** -// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP120]], align 4 -// CHECK8-NEXT: [[TMP121:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to [5 x [10 x double]]** -// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP122]], align 4 -// CHECK8-NEXT: [[TMP123:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK8-NEXT: store i64 400, i64* [[TMP123]], align 4 -// CHECK8-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP109:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 2 +// CHECK8-NEXT: store i8* null, i8** [[TMP109]], align 4 +// CHECK8-NEXT: [[TMP110:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to float** +// CHECK8-NEXT: store float* [[VLA]], float** [[TMP111]], align 4 +// CHECK8-NEXT: [[TMP112:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to float** +// CHECK8-NEXT: store float* [[VLA]], float** [[TMP113]], align 4 +// CHECK8-NEXT: store i64 [[TMP91]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 3), align 4 +// CHECK8-NEXT: [[TMP114:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 3 +// CHECK8-NEXT: store i8* null, i8** [[TMP114]], align 4 +// CHECK8-NEXT: [[TMP115:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to [5 x [10 x double]]** +// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP116]], align 4 +// CHECK8-NEXT: [[TMP117:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP118:%.*]] = bitcast i8** [[TMP117]] to [5 x [10 x double]]** +// CHECK8-NEXT: store [5 x [10 x double]]* [[C]], [5 x [10 x double]]** [[TMP118]], align 4 +// CHECK8-NEXT: [[TMP119:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 4 +// CHECK8-NEXT: store i8* null, i8** [[TMP119]], align 4 +// CHECK8-NEXT: [[TMP120:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP121:%.*]] = bitcast i8** [[TMP120]] to i32* +// CHECK8-NEXT: store i32 5, i32* [[TMP121]], align 4 +// CHECK8-NEXT: [[TMP122:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP123:%.*]] = bitcast i8** [[TMP122]] to i32* +// CHECK8-NEXT: store i32 5, i32* [[TMP123]], align 4 +// CHECK8-NEXT: [[TMP124:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 // CHECK8-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK8-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP125:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 // CHECK8-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* -// CHECK8-NEXT: store i32 5, i32* [[TMP126]], align 4 -// CHECK8-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 5 +// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP126]], align 4 +// CHECK8-NEXT: [[TMP127:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 // CHECK8-NEXT: [[TMP128:%.*]] = bitcast i8** [[TMP127]] to i32* -// CHECK8-NEXT: store i32 5, i32* [[TMP128]], align 4 -// CHECK8-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK8-NEXT: store i64 4, i64* [[TMP129]], align 4 -// CHECK8-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 5 -// CHECK8-NEXT: store i8* null, i8** [[TMP130]], align 4 -// CHECK8-NEXT: [[TMP131:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 6 -// CHECK8-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i32* -// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP132]], align 4 -// CHECK8-NEXT: [[TMP133:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 6 -// CHECK8-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* -// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP134]], align 4 -// CHECK8-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK8-NEXT: store i64 4, i64* [[TMP135]], align 4 -// CHECK8-NEXT: [[TMP136:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 -// CHECK8-NEXT: store i8* null, i8** [[TMP136]], align 4 -// CHECK8-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 -// CHECK8-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to double** -// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP138]], align 4 -// CHECK8-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 -// CHECK8-NEXT: [[TMP140:%.*]] = bitcast i8** [[TMP139]] to double** -// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP140]], align 4 -// CHECK8-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK8-NEXT: store i64 [[TMP94]], i64* [[TMP141]], align 4 -// CHECK8-NEXT: [[TMP142:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 -// CHECK8-NEXT: store i8* null, i8** [[TMP142]], align 4 -// CHECK8-NEXT: [[TMP143:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 -// CHECK8-NEXT: [[TMP144:%.*]] = bitcast i8** [[TMP143]] to %struct.TT** -// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP144]], align 4 -// CHECK8-NEXT: [[TMP145:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 -// CHECK8-NEXT: [[TMP146:%.*]] = bitcast i8** [[TMP145]] to %struct.TT** -// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP146]], align 4 -// CHECK8-NEXT: [[TMP147:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK8-NEXT: store i64 12, i64* [[TMP147]], align 4 -// CHECK8-NEXT: [[TMP148:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 -// CHECK8-NEXT: store i8* null, i8** [[TMP148]], align 4 -// CHECK8-NEXT: [[TMP149:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP150:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP151:%.*]] = getelementptr inbounds [9 x i64], [9 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK8-NEXT: store i32 [[TMP3]], i32* [[TMP128]], align 4 +// CHECK8-NEXT: [[TMP129:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 6 +// CHECK8-NEXT: store i8* null, i8** [[TMP129]], align 4 +// CHECK8-NEXT: [[TMP130:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 7 +// CHECK8-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to double** +// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP131]], align 4 +// CHECK8-NEXT: [[TMP132:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 7 +// CHECK8-NEXT: [[TMP133:%.*]] = bitcast i8** [[TMP132]] to double** +// CHECK8-NEXT: store double* [[VLA1]], double** [[TMP133]], align 4 +// CHECK8-NEXT: store i64 [[TMP94]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 7), align 4 +// CHECK8-NEXT: [[TMP134:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 7 +// CHECK8-NEXT: store i8* null, i8** [[TMP134]], align 4 +// CHECK8-NEXT: [[TMP135:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 8 +// CHECK8-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to %struct.TT** +// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP136]], align 4 +// CHECK8-NEXT: [[TMP137:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 8 +// CHECK8-NEXT: [[TMP138:%.*]] = bitcast i8** [[TMP137]] to %struct.TT** +// CHECK8-NEXT: store %struct.TT* [[D]], %struct.TT** [[TMP138]], align 4 +// CHECK8-NEXT: [[TMP139:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i32 0, i32 8 +// CHECK8-NEXT: store i8* null, i8** [[TMP139]], align 4 +// CHECK8-NEXT: [[TMP140:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK8-NEXT: [[TMP141:%.*]] = getelementptr inbounds [9 x i8*], [9 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK8-NEXT: [[TMP152:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP149]], i8** [[TMP150]], i64* [[TMP151]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK8-NEXT: [[TMP153:%.*]] = icmp ne i32 [[TMP152]], 0 -// CHECK8-NEXT: br i1 [[TMP153]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] +// CHECK8-NEXT: [[TMP142:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142.region_id, i32 9, i8** [[TMP140]], i8** [[TMP141]], i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_sizes.10, i32 0, i32 0), i64* getelementptr inbounds ([9 x i64], [9 x i64]* @.offload_maptypes.11, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK8-NEXT: [[TMP143:%.*]] = icmp ne i32 [[TMP142]], 0 +// CHECK8-NEXT: br i1 [[TMP143]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK8: omp_offload.failed25: // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT26]] @@ -12906,10 +12679,10 @@ // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l142(i32 [[TMP88]], [10 x float]* [[B]], i32 [[TMP1]], float* [[VLA]], [5 x [10 x double]]* [[C]], i32 5, i32 [[TMP3]], double* [[VLA1]], %struct.TT* [[D]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_IF_END28]] // CHECK8: omp_if.end28: -// CHECK8-NEXT: [[TMP154:%.*]] = load i32, i32* [[A]], align 4 -// CHECK8-NEXT: [[TMP155:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP155]]) -// CHECK8-NEXT: ret i32 [[TMP154]] +// CHECK8-NEXT: [[TMP144:%.*]] = load i32, i32* [[A]], align 4 +// CHECK8-NEXT: [[TMP145:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP145]]) +// CHECK8-NEXT: ret i32 [[TMP144]] // // // CHECK8-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z3fooi_l97 @@ -13600,7 +13373,6 @@ // CHECK8-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [6 x i8*], align 4 // CHECK8-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [6 x i8*], align 4 -// CHECK8-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [6 x i64], align 4 // CHECK8-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK8-NEXT: store %struct.S1* [[THIS]], %struct.S1** [[THIS_ADDR]], align 4 // CHECK8-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -13641,67 +13413,55 @@ // CHECK8-NEXT: [[TMP15:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK8-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to double** // CHECK8-NEXT: store double* [[A]], double** [[TMP16]], align 4 -// CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK8-NEXT: store i64 8, i64* [[TMP17]], align 4 -// CHECK8-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK8-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK8-NEXT: [[TMP19:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP20]], align 4 -// CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK8-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* -// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP22]], align 4 -// CHECK8-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK8-NEXT: store i64 4, i64* [[TMP23]], align 4 -// CHECK8-NEXT: [[TMP24:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK8-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK8-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP17:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK8-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK8-NEXT: [[TMP18:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP19]], align 4 +// CHECK8-NEXT: [[TMP20:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK8-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* +// CHECK8-NEXT: store i32 [[TMP6]], i32* [[TMP21]], align 4 +// CHECK8-NEXT: [[TMP22:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK8-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK8-NEXT: [[TMP23:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK8-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK8-NEXT: store i32 2, i32* [[TMP24]], align 4 +// CHECK8-NEXT: [[TMP25:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK8-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* // CHECK8-NEXT: store i32 2, i32* [[TMP26]], align 4 -// CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK8-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* -// CHECK8-NEXT: store i32 2, i32* [[TMP28]], align 4 -// CHECK8-NEXT: [[TMP29:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK8-NEXT: store i64 4, i64* [[TMP29]], align 4 -// CHECK8-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK8-NEXT: store i8* null, i8** [[TMP30]], align 4 -// CHECK8-NEXT: [[TMP31:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK8-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK8-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* -// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 -// CHECK8-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK8-NEXT: store i64 4, i64* [[TMP35]], align 4 -// CHECK8-NEXT: [[TMP36:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK8-NEXT: store i8* null, i8** [[TMP36]], align 4 -// CHECK8-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i16** -// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP38]], align 4 -// CHECK8-NEXT: [[TMP39:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK8-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i16** -// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP40]], align 4 -// CHECK8-NEXT: [[TMP41:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK8-NEXT: store i64 [[TMP12]], i64* [[TMP41]], align 4 -// CHECK8-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP27:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK8-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK8-NEXT: [[TMP28:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK8-NEXT: [[TMP30:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK8-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i32* +// CHECK8-NEXT: store i32 [[TMP1]], i32* [[TMP31]], align 4 +// CHECK8-NEXT: [[TMP32:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK8-NEXT: store i8* null, i8** [[TMP32]], align 4 +// CHECK8-NEXT: [[TMP33:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i16** +// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP34]], align 4 +// CHECK8-NEXT: [[TMP35:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK8-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i16** +// CHECK8-NEXT: store i16* [[VLA]], i16** [[TMP36]], align 4 +// CHECK8-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 4), align 4 +// CHECK8-NEXT: [[TMP37:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK8-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK8-NEXT: [[TMP38:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP39]], align 4 +// CHECK8-NEXT: [[TMP40:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK8-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP41]], align 4 +// CHECK8-NEXT: [[TMP42:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK8-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK8-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK8-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP44]], align 4 -// CHECK8-NEXT: [[TMP45:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK8-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK8-NEXT: store i32 [[TMP8]], i32* [[TMP46]], align 4 -// CHECK8-NEXT: [[TMP47:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK8-NEXT: store i64 1, i64* [[TMP47]], align 4 -// CHECK8-NEXT: [[TMP48:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK8-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK8-NEXT: [[TMP49:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP50:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK8-NEXT: [[TMP51:%.*]] = getelementptr inbounds [6 x i64], [6 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK8-NEXT: [[TMP43:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK8-NEXT: [[TMP44:%.*]] = getelementptr inbounds [6 x i8*], [6 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK8-NEXT: [[TMP52:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP49]], i8** [[TMP50]], i64* [[TMP51]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK8-NEXT: [[TMP53:%.*]] = icmp ne i32 [[TMP52]], 0 -// CHECK8-NEXT: br i1 [[TMP53]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK8-NEXT: [[TMP45:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215.region_id, i32 6, i8** [[TMP43]], i8** [[TMP44]], i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([6 x i64], [6 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK8-NEXT: [[TMP46:%.*]] = icmp ne i32 [[TMP45]], 0 +// CHECK8-NEXT: br i1 [[TMP46]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK8: omp_offload.failed: // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -13711,15 +13471,15 @@ // CHECK8-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2S12r1Ei_l215(%struct.S1* [[THIS1]], i32 [[TMP6]], i32 2, i32 [[TMP1]], i16* [[VLA]], i32 [[TMP8]]) #[[ATTR4]] // CHECK8-NEXT: br label [[OMP_IF_END]] // CHECK8: omp_if.end: -// CHECK8-NEXT: [[TMP54:%.*]] = mul nsw i32 1, [[TMP1]] -// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP54]] +// CHECK8-NEXT: [[TMP47:%.*]] = mul nsw i32 1, [[TMP1]] +// CHECK8-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i16, i16* [[VLA]], i32 [[TMP47]] // CHECK8-NEXT: [[ARRAYIDX4:%.*]] = getelementptr inbounds i16, i16* [[ARRAYIDX]], i32 1 -// CHECK8-NEXT: [[TMP55:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2 -// CHECK8-NEXT: [[CONV5:%.*]] = sext i16 [[TMP55]] to i32 -// CHECK8-NEXT: [[TMP56:%.*]] = load i32, i32* [[B]], align 4 -// CHECK8-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], [[TMP56]] -// CHECK8-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) +// CHECK8-NEXT: [[TMP48:%.*]] = load i16, i16* [[ARRAYIDX4]], align 2 +// CHECK8-NEXT: [[CONV5:%.*]] = sext i16 [[TMP48]] to i32 +// CHECK8-NEXT: [[TMP49:%.*]] = load i32, i32* [[B]], align 4 +// CHECK8-NEXT: [[ADD6:%.*]] = add nsw i32 [[CONV5]], [[TMP49]] +// CHECK8-NEXT: [[TMP50:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK8-NEXT: call void @llvm.stackrestore(i8* [[TMP50]]) // CHECK8-NEXT: ret i32 [[ADD6]] // // @@ -13822,7 +13582,7 @@ // CHECK8-NEXT: [[ADD6:%.*]] = add i32 [[TMP40]], 1 // CHECK8-NEXT: [[TMP41:%.*]] = zext i32 [[ADD6]] to i64 // CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP41]]) -// CHECK8-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK8-NEXT: [[TMP42:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZL7fstatici_l197.region_id, i32 5, i8** [[TMP34]], i8** [[TMP35]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK8-NEXT: [[TMP43:%.*]] = icmp ne i32 [[TMP42]], 0 // CHECK8-NEXT: br i1 [[TMP43]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK8: omp_offload.failed: @@ -13892,7 +13652,7 @@ // CHECK8-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK8-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK8-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK8-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK8-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z9ftemplateIiET_i_l180.region_id, i32 3, i8** [[TMP20]], i8** [[TMP21]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK8-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK8-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK8: omp_offload.failed: @@ -13939,11 +13699,11 @@ // CHECK8-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TOBOOL]] to i8 // CHECK8-NEXT: store i8 [[FROMBOOL]], i8* [[CONV3]], align 1 // CHECK8-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]]) +// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, %struct.S1*, i32, i32, i32, i16*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), %struct.S1* [[TMP0]], i32 [[TMP5]], i32 [[TMP1]], i32 [[TMP2]], i16* [[TMP3]], i32 [[TMP7]]) // CHECK8-NEXT: ret void // // -// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..12 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], %struct.S1* noundef [[THIS:%.*]], i32 noundef [[B:%.*]], i32 noundef [[VLA:%.*]], i32 noundef [[VLA1:%.*]], i16* noundef nonnull align 2 dereferenceable(2) [[C:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK8-NEXT: entry: // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14117,11 +13877,11 @@ // CHECK8-NEXT: [[CONV3:%.*]] = bitcast i32* [[AAA_CASTED]] to i8* // CHECK8-NEXT: store i8 [[TMP7]], i8* [[CONV3]], align 1 // CHECK8-NEXT: [[TMP8:%.*]] = load i32, i32* [[AAA_CASTED]], align 4 -// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) +// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], i32 [[TMP6]], i32 [[TMP8]], [10 x i32]* [[TMP0]]) // CHECK8-NEXT: ret void // // -// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..13 +// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[N:%.*]], i32 noundef [[AA:%.*]], i32 noundef [[AAA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK8-NEXT: entry: // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14279,11 +14039,11 @@ // CHECK8-NEXT: [[CONV1:%.*]] = bitcast i32* [[AA_CASTED]] to i16* // CHECK8-NEXT: store i16 [[TMP3]], i16* [[CONV1]], align 2 // CHECK8-NEXT: [[TMP4:%.*]] = load i32, i32* [[AA_CASTED]], align 4 -// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) +// CHECK8-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP2]], i32 [[TMP4]], [10 x i32]* [[TMP0]]) // CHECK8-NEXT: ret void // // -// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK8-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK8-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[A:%.*]], i32 noundef [[AA:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[B:%.*]]) #[[ATTR3]] { // CHECK8-NEXT: entry: // CHECK8-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_simd_collapse_codegen.cpp @@ -994,7 +994,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1031,74 +1030,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1106,10 +1095,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -1332,7 +1321,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -1464,7 +1453,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1501,74 +1489,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1576,10 +1554,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -1802,7 +1780,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -1934,7 +1912,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1968,74 +1945,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2043,10 +2010,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -2261,7 +2228,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -2391,7 +2358,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2425,74 +2391,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2500,10 +2456,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l80 @@ -2718,7 +2674,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l67.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/target_teams_distribute_simd_dist_schedule_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_simd_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/target_teams_distribute_simd_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/target_teams_distribute_simd_dist_schedule_codegen.cpp @@ -2095,7 +2095,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2103,20 +2102,18 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK9-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK9-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -2138,186 +2135,166 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR4]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK9-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK9-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK9-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK9-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK9-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK9-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR4]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK9-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK9-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK9-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK9-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK9-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK9-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK9-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK9-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK9-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK9-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK9-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK9-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK9-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK9-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK9-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK9-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK9-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK9-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK9-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK9-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP99]]) -// CHECK9-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK9-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK9: omp_offload.failed33: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR4]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK9: omp_offload.cont34: -// CHECK9-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK9-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP86]]) +// CHECK9-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK9-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK9: omp_offload.failed31: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR4]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK9: omp_offload.cont32: +// CHECK9-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK9-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP104]] +// CHECK9-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK9-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP91]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -2601,11 +2578,11 @@ // CHECK9-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK9-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2762,7 +2739,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -2780,7 +2757,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -2798,7 +2775,7 @@ // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK9-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -2814,11 +2791,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2898,11 +2875,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2982,11 +2959,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3097,7 +3074,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3105,20 +3081,18 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_18:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 // CHECK10-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS24:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES25:%.*]] = alloca [4 x i64], align 8 -// CHECK10-NEXT: [[_TMP26:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_28:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -3140,186 +3114,166 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR4]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK10-NEXT: store i32 [[TMP64]], i32* [[CONV20]], align 4 -// CHECK10-NEXT: [[TMP65:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK10-NEXT: [[TMP66:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_18]], align 4 -// CHECK10-NEXT: [[CONV21:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* -// CHECK10-NEXT: store i32 [[TMP66]], i32* [[CONV21]], align 4 -// CHECK10-NEXT: [[TMP67:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: [[TMP68:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP70]], align 8 -// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i64* -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP72]], align 8 -// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP74]], align 8 -// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP76]], align 8 -// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP78]], align 8 -// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 1 +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR4]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK10-NEXT: store i32 [[TMP56]], i32* [[CONV19]], align 4 +// CHECK10-NEXT: [[TMP57:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 +// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* +// CHECK10-NEXT: store i32 [[TMP58]], i32* [[CONV20]], align 4 +// CHECK10-NEXT: [[TMP59:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 +// CHECK10-NEXT: [[TMP60:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP61:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP62:%.*]] = bitcast i8** [[TMP61]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP62]], align 8 +// CHECK10-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK10-NEXT: store i64 [[TMP57]], i64* [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP65]], align 8 +// CHECK10-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP67]], align 8 +// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP70]], align 8 +// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP72]], align 8 +// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 8 +// CHECK10-NEXT: store i64 [[TMP60]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP75]], align 8 +// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP77]], align 8 +// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* +// CHECK10-NEXT: store i64 [[TMP59]], i64* [[TMP79]], align 8 +// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 // CHECK10-NEXT: store i8* null, i8** [[TMP80]], align 8 -// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP82:%.*]] = bitcast i8** [[TMP81]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP82]], align 8 -// CHECK10-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 8 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP68]], i64* [[TMP85]], align 8 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP86]], align 8 -// CHECK10-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP88:%.*]] = bitcast i8** [[TMP87]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP88]], align 8 -// CHECK10-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i64* -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK10-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 3 -// CHECK10-NEXT: store i64 4, i64* [[TMP91]], align 8 -// CHECK10-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS24]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP92]], align 8 -// CHECK10-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES25]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP96:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP96]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[TMP97]], 0 -// CHECK10-NEXT: [[DIV30:%.*]] = sdiv i32 [[SUB29]], 1 -// CHECK10-NEXT: [[SUB31:%.*]] = sub nsw i32 [[DIV30]], 1 -// CHECK10-NEXT: store i32 [[SUB31]], i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_28]], align 4 -// CHECK10-NEXT: [[ADD32:%.*]] = add nsw i32 [[TMP98]], 1 -// CHECK10-NEXT: [[TMP99:%.*]] = zext i32 [[ADD32]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP99]]) -// CHECK10-NEXT: [[TMP100:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP93]], i8** [[TMP94]], i64* [[TMP95]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP101:%.*]] = icmp ne i32 [[TMP100]], 0 -// CHECK10-NEXT: br i1 [[TMP101]], label [[OMP_OFFLOAD_FAILED33:%.*]], label [[OMP_OFFLOAD_CONT34:%.*]] -// CHECK10: omp_offload.failed33: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP65]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP67]]) #[[ATTR4]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT34]] -// CHECK10: omp_offload.cont34: -// CHECK10-NEXT: [[TMP102:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP102]]) +// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP83]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP84]], 0 +// CHECK10-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 +// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 +// CHECK10-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 +// CHECK10-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP85]], 1 +// CHECK10-NEXT: [[TMP86:%.*]] = zext i32 [[ADD30]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP86]]) +// CHECK10-NEXT: [[TMP87:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP81]], i8** [[TMP82]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP88:%.*]] = icmp ne i32 [[TMP87]], 0 +// CHECK10-NEXT: br i1 [[TMP88]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] +// CHECK10: omp_offload.failed31: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i64 [[TMP57]], i64 [[TMP1]], i32* [[VLA]], i64 [[TMP59]]) #[[ATTR4]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT32]] +// CHECK10: omp_offload.cont32: +// CHECK10-NEXT: [[TMP89:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP89]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP103:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP103]]) -// CHECK10-NEXT: [[TMP104:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP104]] +// CHECK10-NEXT: [[TMP90:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP90]]) +// CHECK10-NEXT: [[TMP91:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP91]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -3603,11 +3557,11 @@ // CHECK10-NEXT: [[CONV3:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP4]], i32* [[CONV3]], align 4 // CHECK10-NEXT: [[TMP5:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i64 [[TMP3]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP5]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3764,7 +3718,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -3782,7 +3736,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -3800,7 +3754,7 @@ // CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK10-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -3816,11 +3770,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3900,11 +3854,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3984,11 +3938,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4099,7 +4053,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4107,20 +4060,18 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK11-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -4141,185 +4092,165 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR4]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK11-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK11-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR4]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK11-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK11-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK11-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK11-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK11-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK11-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK11-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK11-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK11-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK11-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK11-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK11-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK11-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK11-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK11-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK11-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP101]]) -// CHECK11-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK11-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK11: omp_offload.failed30: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR4]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK11: omp_offload.cont31: -// CHECK11-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK11-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK11-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK11-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK11-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK11-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP88]]) +// CHECK11-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK11-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK11: omp_offload.failed28: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR4]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK11: omp_offload.cont29: +// CHECK11-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK11-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP106]] +// CHECK11-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK11-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP93]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -4591,11 +4522,11 @@ // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4749,7 +4680,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -4767,7 +4698,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -4785,7 +4716,7 @@ // CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK11-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -4801,11 +4732,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4884,11 +4815,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4967,11 +4898,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5081,7 +5012,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -5089,20 +5019,18 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED18:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR__CASTED:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES22:%.*]] = alloca [4 x i64], align 4 -// CHECK12-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[_TMP21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -5123,185 +5051,165 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR4:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR4]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = load i32, i32* [[N_CASTED18]], align 4 -// CHECK12-NEXT: [[TMP67:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_17]], align 4 -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP68:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: [[TMP69:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP70:%.*]] = sext i32 [[TMP69]] to i64 -// CHECK12-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP72:%.*]] = bitcast i8** [[TMP71]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP72]], align 4 -// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32* -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[TMP74]], align 4 -// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP76]], align 4 -// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP78]], align 4 -// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP80]], align 4 -// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l98(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR4]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N_CASTED17]], align 4 +// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_16]], align 4 +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 +// CHECK12-NEXT: [[TMP61:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP62:%.*]] = sext i32 [[TMP61]] to i64 +// CHECK12-NEXT: [[TMP63:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP64]], align 4 +// CHECK12-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP67]], align 4 +// CHECK12-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP69]], align 4 +// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP72]], align 4 +// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP74:%.*]] = bitcast i8** [[TMP73]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP74]], align 4 +// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 4 +// CHECK12-NEXT: store i64 [[TMP62]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP77]], align 4 +// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP79]], align 4 +// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32* +// CHECK12-NEXT: store i32 [[TMP60]], i32* [[TMP81]], align 4 +// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 // CHECK12-NEXT: store i8* null, i8** [[TMP82]], align 4 -// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP84:%.*]] = bitcast i8** [[TMP83]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP84]], align 4 -// CHECK12-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP86:%.*]] = bitcast i8** [[TMP85]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP86]], align 4 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP70]], i64* [[TMP87]], align 4 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP88]], align 4 -// CHECK12-NEXT: [[TMP89:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP90:%.*]] = bitcast i8** [[TMP89]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP90]], align 4 -// CHECK12-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i32* -// CHECK12-NEXT: store i32 [[TMP68]], i32* [[TMP92]], align 4 -// CHECK12-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP93]], align 4 -// CHECK12-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP94]], align 4 -// CHECK12-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP97:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES22]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP98:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP98]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP99]], 0 -// CHECK12-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 -// CHECK12-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 -// CHECK12-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[TMP100:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK12-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP100]], 1 -// CHECK12-NEXT: [[TMP101:%.*]] = zext i32 [[ADD29]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP101]]) -// CHECK12-NEXT: [[TMP102:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP95]], i8** [[TMP96]], i64* [[TMP97]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP103:%.*]] = icmp ne i32 [[TMP102]], 0 -// CHECK12-NEXT: br i1 [[TMP103]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] -// CHECK12: omp_offload.failed30: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP66]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP68]]) #[[ATTR4]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT31]] -// CHECK12: omp_offload.cont31: -// CHECK12-NEXT: [[TMP104:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP104]]) +// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP85]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[SUB24:%.*]] = sub nsw i32 [[TMP86]], 0 +// CHECK12-NEXT: [[DIV25:%.*]] = sdiv i32 [[SUB24]], 1 +// CHECK12-NEXT: [[SUB26:%.*]] = sub nsw i32 [[DIV25]], 1 +// CHECK12-NEXT: store i32 [[SUB26]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[TMP87:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK12-NEXT: [[ADD27:%.*]] = add nsw i32 [[TMP87]], 1 +// CHECK12-NEXT: [[TMP88:%.*]] = zext i32 [[ADD27]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP88]]) +// CHECK12-NEXT: [[TMP89:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102.region_id, i32 4, i8** [[TMP83]], i8** [[TMP84]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP90:%.*]] = icmp ne i32 [[TMP89]], 0 +// CHECK12-NEXT: br i1 [[TMP90]], label [[OMP_OFFLOAD_FAILED28:%.*]], label [[OMP_OFFLOAD_CONT29:%.*]] +// CHECK12: omp_offload.failed28: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l102(i32 [[TMP58]], i32 [[TMP0]], i32* [[VLA]], i32 [[TMP60]]) #[[ATTR4]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT29]] +// CHECK12: omp_offload.cont29: +// CHECK12-NEXT: [[TMP91:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP91]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP105:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP105]]) -// CHECK12-NEXT: [[TMP106:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP106]] +// CHECK12-NEXT: [[TMP92:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP92]]) +// CHECK12-NEXT: [[TMP93:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP93]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l94 @@ -5573,11 +5481,11 @@ // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP4]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32 [[TMP3]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP5]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR3]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5731,7 +5639,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l76.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -5749,7 +5657,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l80.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -5767,7 +5675,7 @@ // CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK12-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -5783,11 +5691,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5866,11 +5774,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5949,11 +5857,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR3]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/target_update_codegen.cpp b/clang/test/OpenMP/target_update_codegen.cpp --- a/clang/test/OpenMP/target_update_codegen.cpp +++ b/clang/test/OpenMP/target_update_codegen.cpp @@ -43,7 +43,7 @@ // CK1: [[MTYPE03:@.+]] = {{.+}}constant [1 x i64] [i64 2] -// CK1: [[SIZE04:@.+]] = {{.+}}constant [2 x i64] [i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64 24] +// CK1: [[SIZE04:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK1: [[MTYPE04:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK1-LABEL: _Z3fooi @@ -145,6 +145,7 @@ // CK1-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK1-DAG: store [[ST]]* @gb, [[ST]]** [[CBP0]] // CK1-DAG: store double** getelementptr inbounds ([[ST]], [[ST]]* @gb, i32 0, i32 1), double*** [[CP0]] + // CK1-DAG: store i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE04]], i32 0, i32 0), // CK1-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 @@ -206,6 +207,7 @@ } }; +// CK2: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK2: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710674] // CK2-LABEL: _Z3bari @@ -217,21 +219,19 @@ // Region 00 // CK2: br i1 %{{[^,]+}}, label %[[IFTHEN:[^,]+]], label %[[IFELSE:[^,]+]] // CK2: [[IFTHEN]] -// CK2-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i[[sz:64|32]]* [[GEPS:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) +// CK2-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 [[DEV:%[^,]+]], i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK2-DAG: [[DEV]] = sext i32 [[DEVi32:%[^,]+]] to i64 // CK2-DAG: [[DEVi32]] = load i32, i32* %{{[^,]+}}, // CK2-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK2-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] -// CK2-DAG: [[GEPS]] = getelementptr inbounds {{.+}}[[S:%[^,]+]] // CK2-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 -// CK2-DAG: [[S0:%.+]] = getelementptr inbounds {{.+}}[[S]], i{{.+}} 0, i{{.+}} 0 // CK2-DAG: [[CBP0:%.+]] = bitcast i8** [[BP0]] to [[ST]]** // CK2-DAG: [[CP0:%.+]] = bitcast i8** [[P0]] to double*** // CK2-DAG: store [[ST]]* [[VAR0:%[^,]+]], [[ST]]** [[CBP0]] // CK2-DAG: store double** [[SEC0:%[^,]+]], double*** [[CP0]] -// CK2-DAG: store i[[sz]] {{%.+}}, i[[sz]]* [[S0]] +// CK2-DAG: store i64 {{%.+}}, i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE00]], i32 0, i32 0), // CK2-DAG: [[SEC0]] = getelementptr inbounds {{.*}}[[ST]]* [[VAR0]], i32 0, i32 1 @@ -536,24 +536,22 @@ double *p; }; +// CK9: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 8] // CK9: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK9-LABEL: lvalue void lvalue(struct S *s, int l, int e) { - // CK9-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i{{.+}} [[GSIZE:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) + // CK9-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK9-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK9-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK9-DAG: [[GSIZE]] = getelementptr inbounds {{.+}}[[SIZE:%[^,]+]] // // CK9-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK9-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK9-DAG: [[SIZE0:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 1 // CK9-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to double*** // CK9-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to double** // CK9-DAG: store double** [[P:%.+]], double*** [[BPC0]] // CK9-DAG: store double* [[P_VAL:%.+]], double** [[PC0]] - // CK9-DAG: store i{{.+}} 8, i{{.+}}* [[SIZE0]] // CK9-DAG: [[P]] = getelementptr inbounds [[STRUCT_S:%.+]], [[STRUCT_S]]* [[S_VAL:%.+]], i32 0, i32 0 // CK9-DAG: [[S_VAL]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S_ADDR:%.+]] // CK9-DAG: [[P_VAL]] = load double*, double** [[P_1:%.+]], @@ -586,24 +584,22 @@ double *p; }; +// CK10: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 8] // CK10: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK10-LABEL: lvalue void lvalue(struct S *s, int l, int e) { - // CK10-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i{{.+}} [[GSIZE:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) + // CK10-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK10-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK10-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK10-DAG: [[GSIZE]] = getelementptr inbounds {{.+}}[[SIZE:%[^,]+]] // // CK10-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK10-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK10-DAG: [[SIZE0:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 1 // CK10-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to double*** // CK10-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to double** // CK10-DAG: store double** [[P_VAL:%.+]], double*** [[BPC0]] // CK10-DAG: store double* [[ADD_PTR:%.+]], double** [[PC0]] - // CK10-DAG: store i{{.+}} 8, i{{.+}}* [[SIZE0]] // CK10-64-DAG: [[ADD_PTR]] = getelementptr inbounds double, double* [[S_P:%.+]], i{{.+}} [[IDX_EXT:%.+]] // CK10-32-DAG: [[ADD_PTR]] = getelementptr inbounds double, double* [[S_P:%.+]], i{{.+}} [[L_VAL:%.+]] // CK10-64-DAG: [[IDX_EXT]] = sext i32 [[L_VAL:%.+]] to i64 @@ -636,24 +632,22 @@ struct S { double *p; }; +// CK11: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 8] // CK11: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK11-LABEL: lvalue void lvalue(struct S *s, int l, int e) { - // CK11-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i{{.+}} [[GSIZE:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) + // CK11-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}, [2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK11-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK11-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK11-DAG: [[GSIZE]] = getelementptr inbounds {{.+}}[[SIZE:%[^,]+]] // // CK11-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK11-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK11-DAG: [[SIZE0:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 1 // CK11-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to double*** // CK11-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to double** // CK11-DAG: store double** [[P:%.+]], double*** [[BPC0]] // CK11-DAG: store double* [[ARRAY_IDX:%.+]], double** [[PC0]] - // CK11-DAG: store i{{.+}} 8, i{{.+}} [[SIZE0]] // CK11-DAG: [[P]] = getelementptr inbounds [[STRUCT_S:%.+]], [[STRUCT_S]]* [[SS_1:%.+]], i32 0, i32 0 // CK11-DAG: [[ARRAY_IDX]] = getelementptr inbounds double, double* [[ADD_PTR:%.+]], i{{.+}} 3 // CK11-64-DAG: [[ADD_PTR]] = getelementptr inbounds double, double* [[S_P:%.+]], i{{.+}} [[IDX_EXT:%.+]] @@ -688,41 +682,36 @@ double *p; struct S *sp; }; +// CK12: [[SIZE00:@.+]] = {{.+}}global [3 x i64] [i64 0, i64 {{4|8}}, i64 8] // CK12: [[MTYPE00:@.+]] = {{.+}}constant [3 x i64] [i64 0, i64 281474976710672, i64 17] // CK12-LABEL: lvalue void lvalue(struct S *s, int l, int e) { - // CK12-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i{{.+}} [[GSIZE:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}, [3 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) + // CK12-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 3, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[3 x i{{.+}}, [3 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[3 x i{{.+}}, [3 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK12-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK12-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK12-DAG: [[GSIZE]] = getelementptr inbounds {{.+}}[[SIZE:%[^,]+]] // // CK12-DAG: [[BP2:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 2 // CK12-DAG: [[P2:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 2 - // CK12-DAG: [[SIZE2:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 2 // CK12-DAG: [[BPC2:%.+]] = bitcast i8** [[BP2]] to double*** // CK12-DAG: [[PC2:%.+]] = bitcast i8** [[P2]] to double** // CK12-DAG: store double** [[P_VAL:%.+]], double*** [[BPC2]] // CK12-DAG: store double* [[SIX:%.+]], double** [[PC2]] - // CK12-DAG: store i{{.+}} 8, i{{.+}}* [[SIZE2]] // CK12-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK12-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK12-DAG: [[SIZE1:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 1 // CK12-DAG: [[BPC1:%.+]] = bitcast i8** [[BP1]] to [[STRUCT_S:%.+]]*** // CK12-DAG: [[PC1:%.+]] = bitcast i8** [[P1]] to double*** // CK12-DAG: store [[STRUCT_S]]** [[SP:%.+]], [[STRUCT_S]]*** [[BPC1]] // CK12-DAG: store double** [[P_VAL:%.+]], double*** [[PC1]] - // CK12-DAG: store i{{.+}} {{4|8}}, i{{.+}}* [[SIZE1]] // CK12-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK12-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK12-DAG: [[SIZE0:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 0 // CK12-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to [[STRUCT_S:%.+]]** // CK12-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to [[STRUCT_S]]*** // CK12-DAG: store [[STRUCT_S]]* [[ZERO:%.+]], [[STRUCT_S]]** [[BPC0]] // CK12-DAG: store [[STRUCT_S]]** [[SP]], [[STRUCT_S]]*** [[PC0]] // CK12-DAG: store [[STRUCT_S]]** [[S:%.+]], [[STRUCT_S]]*** [[S_VAL:%.+]] - // CK12-DAG: store i{{.+}} {{.+}}, i{{.+}}* [[SIZE0]] + // CK12-DAG: store i{{.+}} {{.+}}, i{{.+}}* getelementptr inbounds ([3 x i64], [3 x i64]* [[SIZE00]], i32 0, i32 0), // CK12-DAG: [[SP]] = getelementptr inbounds [[STRUCT_S]], [[STRUCT_S]]* [[ONE:%.+]], i32 0, i32 1 // CK12-DAG: [[ONE]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S:%.+]], // CK12-DAG: [[ZERO]] = load [[STRUCT_S]]*, [[STRUCT_S]]** [[S]], @@ -797,6 +786,7 @@ // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} #ifdef CK14 +// CK14: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 8] // CK14: [[MTYPE00:@.+]] = private {{.*}}constant [2 x i64] [i64 0, i64 281474976710673] struct SSA { @@ -814,30 +804,26 @@ // CK14-LABEL: define {{.+}}foo void foo() { - // CK14-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GSIZE:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) + // CK14-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK14-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK14-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK14-DAG: [[GSIZE]] = getelementptr inbounds {{.+}}[[SIZE:%[^,]+]] // CK14-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK14-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK14-DAG: [[SIZE1:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 1 // CK14-DAG: [[BPC1:%.+]] = bitcast i8** [[BP1]] to double*** // CK14-DAG: [[PC1:%.+]] = bitcast i8** [[P1]] to double** // CK14-DAG: store double** [[D_VAL:%.+]], double*** [[BPC1]] // CK14-DAG: store double* [[ADD_PTR:%.+]], double** [[PC1]] - // CK14-DAG: store i64 8, i64* [[SIZE1]] // CK14-DAG: [[ADD_PTR]] = getelementptr inbounds double, double* [[ZERO:%.+]], i{{.+}} 1 // CK14-DAG: [[ZERO]] = load double*, double** [[D_VAL_2:%.+]] // CK14-DAG: [[D_VAL]] = getelementptr inbounds [[SSB:%.+]], [[SSB:%.+]]* [[THIS:%.+]], i32 0, i32 0 // CK14-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK14-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK14-DAG: [[SIZE0:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 0 // CK14-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to [[SSB]]** // CK14-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to double*** // CK14-DAG: store [[SSB]]* [[THIS]], [[SSB]]** [[BPC0]], // CK14-DAG: store double** [[D_VAL]], double*** [[PC0]] - // CK14-DAG: store i{{.+}} [[COMPUTE_LEN:%.+]], i{{.+}}* [[SIZE0]] + // CK14-DAG: store i{{.+}} [[COMPUTE_LEN:%.+]], i{{.+}}* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE00]], i32 0, i32 0), #pragma omp target update to(*(this->d+1)) *(this->d+1) = 1; @@ -870,6 +856,7 @@ // SIMD-ONLY0-NOT: {{__kmpc|__tgt}} #ifdef CK15 +// CK15: [[SIZE00:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 {{8|4}}] // CK15: [[MTYPE00:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] struct SSA { @@ -881,30 +868,26 @@ //CK-15-LABEL: lvalue_member void lvalue_member(SSA *sap) { - // CK15-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i64* [[GSIZE:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) + // CK15-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE00]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE00]]{{.+}}, i8** null) // CK15-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP:%[^,]+]] // CK15-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] - // CK15-DAG: [[GSIZE]] = getelementptr inbounds {{.+}}[[SIZE:%[^,]+]] // CK15-DAG: [[BP1:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 1 // CK15-DAG: [[P1:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 1 - // CK15-DAG: [[SIZE1:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 1 // CK15-DAG: [[BPC1:%.+]] = bitcast i8** [[BP1]] to double*** // CK15-DAG: [[PC1:%.+]] = bitcast i8** [[P1]] to double** // CK15-DAG: store double** [[P_VAL:%.+]], double*** [[BPC1]] // CK15-DAG: store double* [[ADD_PTR:%.+]], double** [[PC1]] - // CK15-DAG: store i64 {{4|8}}, i64* [[SIZE1]] // CK15-DAG: [[ADD_PTR]] = getelementptr inbounds double, double* [[THREE:%.+]], i{{.+}} 3 // CK15-DAG: [[THREE]] = load double*, double** [[P_VAL_1:%.+]] // CK15-DAG: [[P_VAL]] = getelementptr inbounds [[SSA:%.+]], [[SSA:%.+]]* [[THIS:%.+]], i32 0, i32 0 // CK15-DAG: [[BP0:%.+]] = getelementptr inbounds {{.+}}[[BP]], i{{.+}} 0, i{{.+}} 0 // CK15-DAG: [[P0:%.+]] = getelementptr inbounds {{.+}}[[P]], i{{.+}} 0, i{{.+}} 0 - // CK15-DAG: [[SIZE0:%.+]] = getelementptr inbounds {{.+}}[[SIZE]], i{{.+}} 0, i{{.+}} 0 // CK15-DAG: [[BPC0:%.+]] = bitcast i8** [[BP0]] to [[SSA]]** // CK15-DAG: store [[SSA]]* [[ZERO:%.+]], [[SSA]]** [[BPC0]], // CK15-DAG: [[PC0:%.+]] = bitcast i8** [[P0]] to double*** // CK15-DAG: store double** [[P_VAL]], double*** [[PC0]], - // CK15-DAG: store i{{.+}} [[COMPUTE_SIZE:%.+]], i{{.+}}* [[SIZE0]] + // CK15-DAG: store i{{.+}} [[COMPUTE_SIZE:%.+]], i{{.+}}* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE00]], i32 0, i32 0), // CK15-DAG: [[COMPUTE_SIZE]] = sdiv exact i64 [[NINE:%.+]], ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64) // CK15-DAG: [[NINE]] = sub i{{.+}} [[SEVEN:%.+]], [[EIGHT:%.+]] // CK15-DAG: [[SEVEN]] = ptrtoint i8* [[SIX:%.+]] to i64 @@ -1255,6 +1238,7 @@ // CK21: [[STRUCT_ST:%.+]] = type { [10 x [10 x [10 x double*]]] } // CK21: [[STRUCT_DESCRIPTOR:%.+]] = type { i64, i64, i64 } +// CK21: [[SIZE:@.+]] = private unnamed_addr global [2 x i64] zeroinitializer // CK21: [[MTYPE:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 299067162755073] struct ST { @@ -1298,7 +1282,7 @@ // CK21: store i64 1, i64* [[COUNT_4]], // CK21: [[STRIDE_4:%.+]] = getelementptr inbounds [[STRUCT_DESCRIPTOR]], [[STRUCT_DESCRIPTOR]]* [[DIM_4]], {{.+}} 0, {{.+}} 2 // CK21: store i64 {{4|8}}, i64* [[STRIDE_4]], - // CK21-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], i{{.+}}* [[GEPSZ:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE]]{{.+}}) + // CK21-DAG: call void @__tgt_target_data_update_mapper(%struct.ident_t* @{{.+}}, i64 -1, i32 2, i8** [[GEPBP:%.+]], i8** [[GEPP:%.+]], {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[SIZE]]{{.+}}, {{.+}}getelementptr {{.+}}[2 x i{{.+}}]* [[MTYPE]]{{.+}}) // CK21-DAG: [[GEPBP]] = getelementptr inbounds {{.+}}[[BP]] // CK21-DAG: [[GEPP]] = getelementptr inbounds {{.+}}[[P:%[^,]+]] // CK21-DAG: [[PC0:%.+]] = bitcast [4 x [[STRUCT_DESCRIPTOR]]]* [[DIMS]] to i8* diff --git a/clang/test/OpenMP/target_update_depend_codegen.cpp b/clang/test/OpenMP/target_update_depend_codegen.cpp --- a/clang/test/OpenMP/target_update_depend_codegen.cpp +++ b/clang/test/OpenMP/target_update_depend_codegen.cpp @@ -37,7 +37,7 @@ // CK1: [[MTYPE03:@.+]] = {{.+}}constant [1 x i64] [i64 2] -// CK1: [[SIZE04:@.+]] = {{.+}}constant [2 x i64] [i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64 24] +// CK1: [[SIZE04:@.+]] = {{.+}}global [2 x i64] [i64 0, i64 24] // CK1: [[MTYPE04:@.+]] = {{.+}}constant [2 x i64] [i64 0, i64 281474976710673] // CK1-LABEL: _Z3fooi @@ -290,6 +290,7 @@ // CK1: [[P0:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[P:%.+]], i32 0, i32 0 // CK1: [[P0_BC:%.+]] = bitcast i8** [[P0]] to double*** // CK1: store double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), double*** [[P0_BC]], + // CK1: store i64 sdiv exact (i64 sub (i64 ptrtoint (double** getelementptr (double*, double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), i32 1) to i64), i64 ptrtoint (double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1) to i64)), i64 ptrtoint (i8* getelementptr (i8, i8* null, i32 1) to i64)), i64* getelementptr inbounds ([2 x i64], [2 x i64]* [[SIZE04]], i32 0, i32 0), // CK1: [[BP1:%.+]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[BP]], i32 0, i32 1 // CK1: [[BP1_BC:%.+]] = bitcast i8** [[BP1]] to double*** // CK1: store double** getelementptr inbounds (%struct.ST, %struct.ST* @gb, i32 0, i32 1), double*** [[BP1_BC]], diff --git a/clang/test/OpenMP/teams_distribute_codegen.cpp b/clang/test/OpenMP/teams_distribute_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_codegen.cpp @@ -1621,7 +1621,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -1643,56 +1642,50 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK9-NEXT: ret i32 [[TMP33]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK9-NEXT: ret i32 [[TMP29]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -1823,7 +1816,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -1845,56 +1837,50 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK10-NEXT: ret i32 [[TMP33]] +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK10-NEXT: ret i32 [[TMP29]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -2025,7 +2011,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2046,56 +2031,50 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK11-NEXT: ret i32 [[TMP33]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK11-NEXT: ret i32 [[TMP29]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -2224,7 +2203,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2245,56 +2223,50 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK12-NEXT: ret i32 [[TMP33]] +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK12-NEXT: ret i32 [[TMP29]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -2936,7 +2908,6 @@ // CHECK25-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK25-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK25-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK25-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK25-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK25-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK25-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2961,47 +2932,41 @@ // CHECK25-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK25-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK25-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK25-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK25-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK25-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK25-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK25-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK25-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK25-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK25-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK25-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK25-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK25-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK25-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK25-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK25-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK25-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK25-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK25-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK25-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK25-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK25-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK25-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK25-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK25-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK25-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK25-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK25-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK25-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK25-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK25-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK25-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK25-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK25-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK25-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK25-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK25-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK25-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK25-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK25-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK25-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK25-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK25-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK25-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK25-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK25-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK25-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK25-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK25-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK25-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK25-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK25-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK25-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK25-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK25-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK25: omp_offload.failed: // CHECK25-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK25-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3009,10 +2974,10 @@ // CHECK25-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK25-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP33]]) // CHECK25-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK25-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK25-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK25-NEXT: ret i32 [[TMP35]] +// CHECK25-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK25-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK25-NEXT: ret i32 [[TMP31]] // // // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -3179,7 +3144,7 @@ // CHECK25-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK25-NEXT: [[TMP22:%.*]] = load i32, i32* [[TH]], align 4 // CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK25-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) +// CHECK25-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) // CHECK25-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK25-NEXT: br i1 [[TMP24]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK25: omp_offload.failed: @@ -3296,7 +3261,6 @@ // CHECK26-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK26-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK26-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK26-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK26-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK26-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK26-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3321,47 +3285,41 @@ // CHECK26-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK26-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK26-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK26-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK26-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK26-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK26-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK26-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK26-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK26-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK26-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK26-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK26-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK26-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK26-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK26-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK26-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK26-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK26-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK26-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK26-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK26-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK26-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK26-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK26-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK26-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK26-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK26-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK26-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK26-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK26-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK26-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK26-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK26-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK26-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK26-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK26-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK26-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK26-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK26-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK26-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK26-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK26-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK26-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK26-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK26-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK26-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK26-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK26-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK26-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK26-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK26-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK26-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK26-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK26-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK26-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK26: omp_offload.failed: // CHECK26-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK26-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3369,10 +3327,10 @@ // CHECK26-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK26-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP33]]) // CHECK26-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK26-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK26-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK26-NEXT: ret i32 [[TMP35]] +// CHECK26-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK26-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK26-NEXT: ret i32 [[TMP31]] // // // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -3539,7 +3497,7 @@ // CHECK26-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK26-NEXT: [[TMP22:%.*]] = load i32, i32* [[TH]], align 4 // CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK26-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) +// CHECK26-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) // CHECK26-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK26-NEXT: br i1 [[TMP24]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK26: omp_offload.failed: @@ -3656,7 +3614,6 @@ // CHECK27-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK27-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK27-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK27-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK27-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK27-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK27-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3680,47 +3637,41 @@ // CHECK27-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK27-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK27-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK27-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK27-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK27-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK27-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK27-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK27-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK27-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK27-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK27-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK27-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK27-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK27-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK27-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK27-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK27-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK27-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK27-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK27-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK27-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK27-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK27-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK27-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK27-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK27-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK27-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK27-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK27-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK27-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK27-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK27-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK27-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK27-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK27-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK27-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK27-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK27-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK27-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK27-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK27-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK27-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK27-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK27-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK27-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK27-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK27-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK27-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK27-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK27-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK27-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK27-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK27-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK27-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK27-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK27: omp_offload.failed: // CHECK27-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK27-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3728,10 +3679,10 @@ // CHECK27-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK27-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP33]]) // CHECK27-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK27-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK27-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK27-NEXT: ret i32 [[TMP35]] +// CHECK27-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK27-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK27-NEXT: ret i32 [[TMP31]] // // // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -3894,7 +3845,7 @@ // CHECK27-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK27-NEXT: [[TMP22:%.*]] = load i32, i32* [[TH]], align 4 // CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK27-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) +// CHECK27-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) // CHECK27-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK27-NEXT: br i1 [[TMP24]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK27: omp_offload.failed: @@ -4008,7 +3959,6 @@ // CHECK28-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK28-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK28-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK28-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK28-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK28-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK28-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4032,47 +3982,41 @@ // CHECK28-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK28-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK28-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK28-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK28-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK28-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK28-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK28-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK28-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK28-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK28-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK28-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK28-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK28-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK28-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK28-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK28-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK28-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK28-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK28-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK28-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK28-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK28-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK28-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK28-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK28-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK28-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK28-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK28-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK28-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK28-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK28-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK28-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK28-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK28-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK28-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK28-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK28-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK28-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK28-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK28-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK28-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK28-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK28-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK28-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK28-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK28-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK28-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK28-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK28-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK28-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK28-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK28-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK28-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK28-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK28-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK28: omp_offload.failed: // CHECK28-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK28-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4080,10 +4024,10 @@ // CHECK28-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK28-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP33]]) // CHECK28-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK28-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK28-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK28-NEXT: ret i32 [[TMP35]] +// CHECK28-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK28-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK28-NEXT: ret i32 [[TMP31]] // // // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -4246,7 +4190,7 @@ // CHECK28-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK28-NEXT: [[TMP22:%.*]] = load i32, i32* [[TH]], align 4 // CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK28-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) +// CHECK28-NEXT: [[TMP23:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 [[TMP22]]) // CHECK28-NEXT: [[TMP24:%.*]] = icmp ne i32 [[TMP23]], 0 // CHECK28-NEXT: br i1 [[TMP24]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK28: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_collapse_codegen.cpp b/clang/test/OpenMP/teams_distribute_collapse_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_collapse_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_collapse_codegen.cpp @@ -29,7 +29,7 @@ #pragma omp teams distribute collapse(2) for(int i = 0; i < X; i++) { for(int j = 0; j < Y; j++) { - a[i][j] = (T)0; + a[i][j] = (T)0; } } @@ -685,7 +685,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -722,74 +721,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -797,10 +786,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82 @@ -995,7 +984,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -1119,7 +1108,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1156,74 +1144,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1231,10 +1209,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82 @@ -1429,7 +1407,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -1553,7 +1531,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1587,74 +1564,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1662,10 +1629,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82 @@ -1856,7 +1823,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -1978,7 +1945,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2012,74 +1978,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2087,10 +2043,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l82 @@ -2281,7 +2237,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l68.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp b/clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_dist_schedule_codegen.cpp @@ -1526,7 +1526,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -1534,18 +1533,16 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES23:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED17:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [3 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [3 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [3 x i8*], align 8 +// CHECK9-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -1567,170 +1564,152 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[CONV19]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i64, i64* [[N_CASTED18]], align 8 -// CHECK9-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP67]], align 8 -// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP70]], align 8 -// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP71]], align 8 -// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP75]], align 8 -// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP76]], align 8 -// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP77]], align 8 -// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 8 -// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP82]], align 8 -// CHECK9-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP83]], align 8 -// CHECK9-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP87:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP87]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK9-NEXT: [[TMP88:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP88]], 0 -// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 -// CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP89]], 1 -// CHECK9-NEXT: [[TMP90:%.*]] = zext i32 [[ADD30]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP90]]) -// CHECK9-NEXT: [[TMP91:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP84]], i8** [[TMP85]], i64* [[TMP86]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0 -// CHECK9-NEXT: br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] -// CHECK9: omp_offload.failed31: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP64]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT32]] -// CHECK9: omp_offload.cont32: -// CHECK9-NEXT: [[TMP93:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP93]]) +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[CONV18:%.*]] = bitcast i64* [[N_CASTED17]] to i32* +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[CONV18]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i64, i64* [[N_CASTED17]], align 8 +// CHECK9-NEXT: [[TMP57:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP58:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP59]], align 8 +// CHECK9-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK9-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP62]], align 8 +// CHECK9-NEXT: [[TMP63:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP66]], align 8 +// CHECK9-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP67]], align 8 +// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 8 +// CHECK9-NEXT: store i64 [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP75:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP75]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK9-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK9-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP76]], 0 +// CHECK9-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 +// CHECK9-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP77]], 1 +// CHECK9-NEXT: [[TMP78:%.*]] = zext i32 [[ADD28]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP78]]) +// CHECK9-NEXT: [[TMP79:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP73]], i8** [[TMP74]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP80:%.*]] = icmp ne i32 [[TMP79]], 0 +// CHECK9-NEXT: br i1 [[TMP80]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] +// CHECK9: omp_offload.failed29: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP56]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT30]] +// CHECK9: omp_offload.cont30: +// CHECK9-NEXT: [[TMP81:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP81]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP94:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP94]]) -// CHECK9-NEXT: [[TMP95:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP95]] +// CHECK9-NEXT: [[TMP82:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP82]]) +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP83]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -1975,11 +1954,11 @@ // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2124,7 +2103,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -2142,7 +2121,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -2160,7 +2139,7 @@ // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK9-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -2176,11 +2155,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2253,11 +2232,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2330,11 +2309,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2438,7 +2417,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2446,18 +2424,16 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES23:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED17:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [3 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [3 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [3 x i8*], align 8 +// CHECK10-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -2479,170 +2455,152 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[CONV19]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i64, i64* [[N_CASTED18]], align 8 -// CHECK10-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP67]], align 8 -// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP70]], align 8 -// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP71]], align 8 -// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP75]], align 8 -// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP76]], align 8 -// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP77]], align 8 -// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 8 -// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP82]], align 8 -// CHECK10-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP83]], align 8 -// CHECK10-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP87:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP87]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK10-NEXT: [[TMP88:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP88]], 0 -// CHECK10-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 -// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 -// CHECK10-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP89]], 1 -// CHECK10-NEXT: [[TMP90:%.*]] = zext i32 [[ADD30]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP90]]) -// CHECK10-NEXT: [[TMP91:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP84]], i8** [[TMP85]], i64* [[TMP86]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0 -// CHECK10-NEXT: br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] -// CHECK10: omp_offload.failed31: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP64]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT32]] -// CHECK10: omp_offload.cont32: -// CHECK10-NEXT: [[TMP93:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP93]]) +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[CONV18:%.*]] = bitcast i64* [[N_CASTED17]] to i32* +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[CONV18]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i64, i64* [[N_CASTED17]], align 8 +// CHECK10-NEXT: [[TMP57:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP58:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP59]], align 8 +// CHECK10-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK10-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP62]], align 8 +// CHECK10-NEXT: [[TMP63:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP66]], align 8 +// CHECK10-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP67]], align 8 +// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 8 +// CHECK10-NEXT: store i64 [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP75:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP75]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK10-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK10-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP76]], 0 +// CHECK10-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 +// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 +// CHECK10-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP77]], 1 +// CHECK10-NEXT: [[TMP78:%.*]] = zext i32 [[ADD28]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP78]]) +// CHECK10-NEXT: [[TMP79:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP73]], i8** [[TMP74]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP80:%.*]] = icmp ne i32 [[TMP79]], 0 +// CHECK10-NEXT: br i1 [[TMP80]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] +// CHECK10: omp_offload.failed29: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP56]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT30]] +// CHECK10: omp_offload.cont30: +// CHECK10-NEXT: [[TMP81:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP81]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP94:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP94]]) -// CHECK10-NEXT: [[TMP95:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP95]] +// CHECK10-NEXT: [[TMP82:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP82]]) +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP83]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -2887,11 +2845,11 @@ // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3036,7 +2994,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -3054,7 +3012,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -3072,7 +3030,7 @@ // CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK10-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -3088,11 +3046,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3165,11 +3123,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3242,11 +3200,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3350,7 +3308,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3358,18 +3315,16 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [3 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [3 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [3 x i8*], align 4 +// CHECK11-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -3390,170 +3345,152 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP67:%.*]] = sext i32 [[TMP66]] to i64 -// CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP69]], align 4 -// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP72]], align 4 -// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP73]], align 4 -// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP77]], align 4 -// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP78]], align 4 -// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP79]], align 4 -// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP83]], align 4 -// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP67]], i64* [[TMP84]], align 4 -// CHECK11-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP85]], align 4 -// CHECK11-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP89]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP90]], 0 -// CHECK11-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK11-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP91:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP91]], 1 -// CHECK11-NEXT: [[TMP92:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP92]]) -// CHECK11-NEXT: [[TMP93:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP86]], i8** [[TMP87]], i64* [[TMP88]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP94:%.*]] = icmp ne i32 [[TMP93]], 0 -// CHECK11-NEXT: br i1 [[TMP94]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK11: omp_offload.failed29: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP65]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK11: omp_offload.cont30: -// CHECK11-NEXT: [[TMP95:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP95]]) +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP58:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP59:%.*]] = sext i32 [[TMP58]] to i64 +// CHECK11-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP61]], align 4 +// CHECK11-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK11-NEXT: [[TMP64:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP64]], align 4 +// CHECK11-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP68]], align 4 +// CHECK11-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP69]], align 4 +// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP73]], align 4 +// CHECK11-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP74]], align 4 +// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP77:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP77]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP78]], 0 +// CHECK11-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK11-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP79]], 1 +// CHECK11-NEXT: [[TMP80:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP80]]) +// CHECK11-NEXT: [[TMP81:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP75]], i8** [[TMP76]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP82:%.*]] = icmp ne i32 [[TMP81]], 0 +// CHECK11-NEXT: br i1 [[TMP82]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK11: omp_offload.failed27: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP57]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK11: omp_offload.cont28: +// CHECK11-NEXT: [[TMP83:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP83]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP96:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP96]]) -// CHECK11-NEXT: [[TMP97:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP97]] +// CHECK11-NEXT: [[TMP84:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP84]]) +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP85]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -3792,11 +3729,11 @@ // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -3939,7 +3876,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -3957,7 +3894,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -3975,7 +3912,7 @@ // CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK11-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -3991,11 +3928,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4067,11 +4004,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4143,11 +4080,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4250,7 +4187,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4258,18 +4194,16 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [3 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [3 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [3 x i8*], align 4 +// CHECK12-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -4290,170 +4224,152 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP67:%.*]] = sext i32 [[TMP66]] to i64 -// CHECK12-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP69]], align 4 -// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP72]], align 4 -// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP73]], align 4 -// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP77]], align 4 -// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP78]], align 4 -// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP79]], align 4 -// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP83]], align 4 -// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP67]], i64* [[TMP84]], align 4 -// CHECK12-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP85]], align 4 -// CHECK12-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP89]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP90]], 0 -// CHECK12-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK12-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK12-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP91:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP91]], 1 -// CHECK12-NEXT: [[TMP92:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP92]]) -// CHECK12-NEXT: [[TMP93:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP86]], i8** [[TMP87]], i64* [[TMP88]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP94:%.*]] = icmp ne i32 [[TMP93]], 0 -// CHECK12-NEXT: br i1 [[TMP94]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK12: omp_offload.failed29: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP65]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK12: omp_offload.cont30: -// CHECK12-NEXT: [[TMP95:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP95]]) +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP58:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP59:%.*]] = sext i32 [[TMP58]] to i64 +// CHECK12-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP61]], align 4 +// CHECK12-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK12-NEXT: [[TMP64:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP64]], align 4 +// CHECK12-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP68]], align 4 +// CHECK12-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP69]], align 4 +// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP73]], align 4 +// CHECK12-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP74]], align 4 +// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP77:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP77]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP78]], 0 +// CHECK12-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK12-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP79]], 1 +// CHECK12-NEXT: [[TMP80:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP80]]) +// CHECK12-NEXT: [[TMP81:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP75]], i8** [[TMP76]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP82:%.*]] = icmp ne i32 [[TMP81]], 0 +// CHECK12-NEXT: br i1 [[TMP82]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK12: omp_offload.failed27: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP57]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK12: omp_offload.cont28: +// CHECK12-NEXT: [[TMP83:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP83]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP96:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP96]]) -// CHECK12-NEXT: [[TMP97:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP97]] +// CHECK12-NEXT: [[TMP84:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP84]]) +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP85]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -4692,11 +4608,11 @@ // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4839,7 +4755,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -4857,7 +4773,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -4875,7 +4791,7 @@ // CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK12-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -4891,11 +4807,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4967,11 +4883,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5043,11 +4959,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_codegen.cpp @@ -2445,7 +2445,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2467,56 +2466,50 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK9-NEXT: ret i32 [[TMP33]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK9-NEXT: ret i32 [[TMP29]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -2748,7 +2741,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2770,56 +2762,50 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK10-NEXT: ret i32 [[TMP33]] +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK10-NEXT: ret i32 [[TMP29]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -3051,7 +3037,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3072,56 +3057,50 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK11-NEXT: ret i32 [[TMP33]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK11-NEXT: ret i32 [[TMP29]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -3347,7 +3326,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3368,56 +3346,50 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK12-NEXT: ret i32 [[TMP33]] +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK12-NEXT: ret i32 [[TMP29]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l73 @@ -4440,7 +4412,6 @@ // CHECK25-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK25-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK25-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK25-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK25-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK25-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK25-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4465,47 +4436,41 @@ // CHECK25-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK25-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK25-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK25-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK25-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK25-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK25-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK25-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK25-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK25-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK25-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK25-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK25-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK25-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK25-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK25-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK25-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK25-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK25-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK25-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK25-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK25-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK25-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK25-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK25-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK25-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK25-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK25-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK25-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK25-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK25-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK25-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK25-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK25-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK25-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK25-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK25-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK25-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK25-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK25-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK25-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK25-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK25-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK25-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK25-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK25-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK25-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK25-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK25-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK25-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK25-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK25-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK25-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK25-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK25-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK25-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK25: omp_offload.failed: // CHECK25-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK25-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -4513,10 +4478,10 @@ // CHECK25-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK25-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP33]]) // CHECK25-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK25-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK25-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK25-NEXT: ret i32 [[TMP35]] +// CHECK25-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK25-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK25-NEXT: ret i32 [[TMP31]] // // // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -4783,7 +4748,7 @@ // CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK25-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK25-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK25-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK25-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK25-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK25: omp_offload.failed: @@ -4973,7 +4938,6 @@ // CHECK26-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK26-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK26-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK26-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK26-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK26-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK26-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4998,47 +4962,41 @@ // CHECK26-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK26-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK26-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK26-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK26-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK26-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK26-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK26-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK26-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK26-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK26-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK26-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK26-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK26-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK26-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK26-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK26-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK26-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK26-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK26-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK26-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK26-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK26-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK26-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK26-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK26-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK26-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK26-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK26-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK26-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK26-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK26-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK26-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK26-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK26-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK26-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK26-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK26-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK26-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK26-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK26-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK26-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK26-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK26-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK26-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK26-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK26-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK26-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK26-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK26-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK26-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK26-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK26-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK26-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK26-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK26-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK26: omp_offload.failed: // CHECK26-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK26-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -5046,10 +5004,10 @@ // CHECK26-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK26-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP33]]) // CHECK26-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK26-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK26-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK26-NEXT: ret i32 [[TMP35]] +// CHECK26-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK26-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK26-NEXT: ret i32 [[TMP31]] // // // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -5316,7 +5274,7 @@ // CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK26-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK26-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK26-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK26-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK26-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK26: omp_offload.failed: @@ -5506,7 +5464,6 @@ // CHECK27-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK27-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK27-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK27-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK27-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK27-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK27-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -5530,47 +5487,41 @@ // CHECK27-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK27-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK27-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK27-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK27-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK27-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK27-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK27-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK27-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK27-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK27-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK27-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK27-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK27-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK27-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK27-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK27-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK27-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK27-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK27-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK27-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK27-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK27-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK27-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK27-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK27-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK27-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK27-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK27-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK27-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK27-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK27-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK27-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK27-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK27-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK27-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK27-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK27-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK27-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK27-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK27-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK27-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK27-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK27-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK27-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK27-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK27-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK27-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK27-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK27-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK27-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK27-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK27-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK27-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK27-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK27-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK27: omp_offload.failed: // CHECK27-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK27-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -5578,10 +5529,10 @@ // CHECK27-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK27-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP33]]) // CHECK27-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK27-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK27-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK27-NEXT: ret i32 [[TMP35]] +// CHECK27-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK27-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK27-NEXT: ret i32 [[TMP31]] // // // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -5840,7 +5791,7 @@ // CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK27-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK27-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK27-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK27-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK27-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK27: omp_offload.failed: @@ -6023,7 +5974,6 @@ // CHECK28-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK28-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK28-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK28-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK28-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK28-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK28-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6047,47 +5997,41 @@ // CHECK28-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK28-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK28-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK28-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK28-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK28-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK28-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK28-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK28-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK28-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK28-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK28-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK28-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK28-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK28-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK28-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK28-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK28-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK28-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK28-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK28-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK28-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK28-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK28-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK28-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK28-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK28-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK28-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK28-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK28-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK28-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK28-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK28-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK28-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK28-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK28-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK28-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK28-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK28-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK28-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK28-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK28-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK28-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK28-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK28-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK28-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK28-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK28-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK28-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK28-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK28-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK28-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK28-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK28-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK28-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK28-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK28: omp_offload.failed: // CHECK28-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK28-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6095,10 +6039,10 @@ // CHECK28-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK28-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP33]]) // CHECK28-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK28-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK28-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK28-NEXT: ret i32 [[TMP35]] +// CHECK28-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK28-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK28-NEXT: ret i32 [[TMP31]] // // // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l162 @@ -6357,7 +6301,7 @@ // CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK28-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK28-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK28-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l151.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK28-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK28-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK28: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_collapse_codegen.cpp @@ -982,7 +982,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1019,74 +1018,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1094,10 +1083,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -1412,7 +1401,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -1611,7 +1600,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1648,74 +1636,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1723,10 +1701,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -2041,7 +2019,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -2240,7 +2218,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2274,74 +2251,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2349,10 +2316,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -2667,7 +2634,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -2860,7 +2827,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2894,74 +2860,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2969,10 +2925,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -3287,7 +3243,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_dist_schedule_codegen.cpp @@ -2403,7 +2403,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2411,19 +2410,17 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK9-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -2446,184 +2443,164 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK9-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK9-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK9-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK9-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK9-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK9-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK9-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK9-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK9-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK9-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK9-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK9-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK9-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK9-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK9-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK9-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK9-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK9-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK9-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK9-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK9-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK9-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK9-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK9-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK9-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK9-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK9-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK9-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK9: omp_offload.failed32: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK9: omp_offload.cont33: -// CHECK9-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP101]]) +// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK9-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK9-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK9-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK9-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK9-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK9-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK9: omp_offload.failed30: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK9: omp_offload.cont31: +// CHECK9-NEXT: [[TMP88:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP88]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP102:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP102]]) -// CHECK9-NEXT: [[TMP103:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP103]] +// CHECK9-NEXT: [[TMP89:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP89]]) +// CHECK9-NEXT: [[TMP90:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP90]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106 @@ -3073,11 +3050,11 @@ // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3159,7 +3136,7 @@ // CHECK9-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4 // CHECK9-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3201,7 +3178,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3341,7 +3318,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -3359,7 +3336,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -3389,7 +3366,7 @@ // CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK9-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -3405,11 +3382,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3457,7 +3434,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3472,7 +3449,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3555,11 +3532,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3607,7 +3584,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3622,7 +3599,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3716,11 +3693,11 @@ // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3776,7 +3753,7 @@ // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK9-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -3812,7 +3789,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3913,7 +3890,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3921,19 +3897,17 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK10-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -3956,184 +3930,164 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK10-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK10-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK10-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK10-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK10-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK10-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK10-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK10-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK10-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK10-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK10-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK10-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK10-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK10-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK10-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK10-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK10-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK10-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK10-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK10-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK10-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK10-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK10-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK10-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK10-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK10-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK10-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK10-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK10-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK10: omp_offload.failed32: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK10: omp_offload.cont33: -// CHECK10-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP101]]) +// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK10-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK10-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK10-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK10-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK10-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK10-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK10: omp_offload.failed30: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK10: omp_offload.cont31: +// CHECK10-NEXT: [[TMP88:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP88]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP102:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP102]]) -// CHECK10-NEXT: [[TMP103:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP103]] +// CHECK10-NEXT: [[TMP89:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP89]]) +// CHECK10-NEXT: [[TMP90:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP90]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106 @@ -4583,11 +4537,11 @@ // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4669,7 +4623,7 @@ // CHECK10-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4 // CHECK10-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -4711,7 +4665,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4851,7 +4805,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -4869,7 +4823,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -4899,7 +4853,7 @@ // CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK10-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -4915,11 +4869,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4967,7 +4921,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -4982,7 +4936,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5065,11 +5019,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5117,7 +5071,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -5132,7 +5086,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5226,11 +5180,11 @@ // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK10-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5286,7 +5240,7 @@ // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK10-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -5322,7 +5276,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5423,7 +5377,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -5431,19 +5384,17 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK11-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -5465,183 +5416,163 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK11-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK11-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK11-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK11-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK11-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK11-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK11-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK11-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK11-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK11-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK11-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK11-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK11-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK11-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK11-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK11-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK11-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK11-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK11: omp_offload.failed29: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK11: omp_offload.cont30: -// CHECK11-NEXT: [[TMP103:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP103]]) +// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK11-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK11-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK11-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK11-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK11-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK11: omp_offload.failed27: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK11: omp_offload.cont28: +// CHECK11-NEXT: [[TMP90:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP90]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP104:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP104]]) -// CHECK11-NEXT: [[TMP105:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP105]] +// CHECK11-NEXT: [[TMP91:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP91]]) +// CHECK11-NEXT: [[TMP92:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP92]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106 @@ -6076,11 +6007,11 @@ // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6158,7 +6089,7 @@ // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6200,7 +6131,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6336,7 +6267,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -6354,7 +6285,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -6383,7 +6314,7 @@ // CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK11-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -6399,11 +6330,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6449,7 +6380,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6464,7 +6395,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6544,11 +6475,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6594,7 +6525,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6609,7 +6540,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6698,11 +6629,11 @@ // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6754,7 +6685,7 @@ // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK11-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -6790,7 +6721,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -6887,7 +6818,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6895,19 +6825,17 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK12-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -6929,183 +6857,163 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l111(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK12-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK12-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK12-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK12-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK12-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK12-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK12-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK12-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK12-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK12-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK12-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK12-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK12-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK12-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK12-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK12-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK12-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK12-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK12-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK12-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK12: omp_offload.failed29: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK12: omp_offload.cont30: -// CHECK12-NEXT: [[TMP103:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP103]]) +// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK12-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK12-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK12-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK12-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK12-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK12: omp_offload.failed27: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l116(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK12: omp_offload.cont28: +// CHECK12-NEXT: [[TMP90:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP90]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP104:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP104]]) -// CHECK12-NEXT: [[TMP105:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP105]] +// CHECK12-NEXT: [[TMP91:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP91]]) +// CHECK12-NEXT: [[TMP92:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP92]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l106 @@ -7540,11 +7448,11 @@ // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7622,7 +7530,7 @@ // CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -7664,7 +7572,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7800,7 +7708,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -7818,7 +7726,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -7847,7 +7755,7 @@ // CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l94.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK12-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -7863,11 +7771,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7913,7 +7821,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -7928,7 +7836,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8008,11 +7916,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8058,7 +7966,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8073,7 +7981,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8162,11 +8070,11 @@ // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK12-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8218,7 +8126,7 @@ // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK12-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8254,7 +8162,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_schedule_codegen.cpp @@ -7426,7 +7426,6 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -7434,36 +7433,32 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -7486,310 +7481,276 @@ // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK13-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK13-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK13-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK13-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK13-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK13-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK13-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK13-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK13-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: -// CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK13-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK13-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK13-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK13-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK13-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK13-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK13-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK13-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK13-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK13-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK13-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK13-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK13-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK13-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK13-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK13: omp_offload.failed15: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK13: omp_offload.cont16: +// CHECK13-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK13-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK13-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK13-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK13-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK13-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK13-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK13-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK13: omp_offload.failed16: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK13: omp_offload.cont17: -// CHECK13-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK13-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK13-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK13-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK13-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK13-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK13-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK13-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK13-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK13-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK13-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK13-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK13-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK13-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK13-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK13-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK13-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK13-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK13-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK13-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK13-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK13-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK13-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK13-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK13-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK13-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK13-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK13-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK13-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK13-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK13-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK13-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK13-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK13-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK13-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK13-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK13-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK13-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK13-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK13-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK13-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK13-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK13-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK13-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK13: omp_offload.failed32: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK13: omp_offload.cont33: -// CHECK13-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK13-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK13-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK13-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK13-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK13-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK13-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK13-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK13-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK13-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK13-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK13-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK13-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK13-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK13-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK13-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK13-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK13-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK13-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK13-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK13-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK13-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK13-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK13-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK13: omp_offload.failed47: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK13: omp_offload.cont48: -// CHECK13-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK13-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK13-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK13-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK13-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK13-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK13-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK13-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK13-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK13-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK13-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK13-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK13-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK13-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK13-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK13-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK13-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK13-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK13-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK13-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK13-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK13-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK13-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK13-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK13-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK13-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK13-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK13-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK13-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK13-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK13-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK13-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK13-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK13-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK13-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK13: omp_offload.failed64: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK13: omp_offload.cont65: -// CHECK13-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK13-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK13-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK13-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK13-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK13-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK13-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK13-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK13-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK13: omp_offload.failed30: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK13: omp_offload.cont31: +// CHECK13-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK13-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK13-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK13-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK13-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK13-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK13-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK13-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK13-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK13-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK13-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK13-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK13-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK13-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK13-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK13-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK13-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK13-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK13-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK13-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK13: omp_offload.failed44: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK13: omp_offload.cont45: +// CHECK13-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK13-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK13-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK13-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK13-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK13-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK13-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK13-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK13-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK13-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK13-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK13-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK13-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK13-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK13-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK13-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK13-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK13-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK13-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK13-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK13-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK13-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK13-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK13-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK13-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK13-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK13-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK13-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK13: omp_offload.failed60: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK13: omp_offload.cont61: +// CHECK13-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK13-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK13-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK13-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP171]] +// CHECK13-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK13-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP149]] // // // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -8239,11 +8200,11 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8325,7 +8286,7 @@ // CHECK13-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4 // CHECK13-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8367,7 +8328,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8487,11 +8448,11 @@ // CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8563,7 +8524,7 @@ // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8582,7 +8543,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8706,11 +8667,11 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8790,7 +8751,7 @@ // CHECK13-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4 // CHECK13-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -8809,7 +8770,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -8955,7 +8916,7 @@ // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK13-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: @@ -8973,7 +8934,7 @@ // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK13-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK13: omp_offload.failed5: @@ -9003,7 +8964,7 @@ // CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK13-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK13: omp_offload.failed11: @@ -9021,7 +8982,7 @@ // CHECK13-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK13-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK13: omp_offload.failed17: @@ -9051,7 +9012,7 @@ // CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK13-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK13: omp_offload.failed25: @@ -9067,11 +9028,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9119,7 +9080,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9134,7 +9095,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9217,11 +9178,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9269,7 +9230,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9284,7 +9245,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9378,11 +9339,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9438,7 +9399,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9453,7 +9414,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9561,11 +9522,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9613,7 +9574,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9628,7 +9589,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9720,11 +9681,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9780,7 +9741,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -9795,7 +9756,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9895,7 +9856,6 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -9903,36 +9863,32 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK14-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK14-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -9955,310 +9911,276 @@ // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK14-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK14-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK14-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK14-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK14-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK14-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK14-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK14-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK14-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK14-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK14-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK14: omp_offload.cont: -// CHECK14-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK14-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK14-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK14-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK14-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK14-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK14-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK14-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK14-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK14-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK14-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK14-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK14-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK14-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK14-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK14-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK14-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK14-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK14: omp_offload.failed15: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK14: omp_offload.cont16: +// CHECK14-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK14-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK14-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK14-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK14-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK14-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK14-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK14-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK14-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK14-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK14: omp_offload.failed16: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK14: omp_offload.cont17: -// CHECK14-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK14-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK14-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK14-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK14-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK14-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK14-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK14-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK14-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK14-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK14-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK14-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK14-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK14-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK14-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK14-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK14-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK14-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK14-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK14-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK14-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK14-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK14-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK14-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK14-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK14-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK14-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK14-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK14-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK14-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK14-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK14-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK14-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK14-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK14-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK14-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK14-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK14-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK14-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK14-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK14-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK14-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK14-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK14-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK14: omp_offload.failed32: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK14: omp_offload.cont33: -// CHECK14-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK14-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK14-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK14-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK14-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK14-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK14-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK14-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK14-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK14-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK14-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK14-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK14-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK14-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK14-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK14-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK14-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK14-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK14-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK14-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK14-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK14-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK14-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK14-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK14: omp_offload.failed47: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK14: omp_offload.cont48: -// CHECK14-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK14-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK14-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK14-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK14-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK14-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK14-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK14-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK14-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK14-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK14-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK14-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK14-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK14-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK14-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK14-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK14-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK14-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK14-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK14-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK14-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK14-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK14-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK14-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK14-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK14-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK14-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK14-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK14-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK14-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK14-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK14-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK14-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK14-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK14-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK14: omp_offload.failed64: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK14: omp_offload.cont65: -// CHECK14-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK14-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK14-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK14-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK14-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK14-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK14-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK14-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK14-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK14: omp_offload.failed30: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK14: omp_offload.cont31: +// CHECK14-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK14-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK14-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK14-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK14-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK14-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK14-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK14-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK14-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK14-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK14-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK14-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK14-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK14-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK14-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK14-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK14-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK14-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK14-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK14-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK14: omp_offload.failed44: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK14: omp_offload.cont45: +// CHECK14-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK14-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK14-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK14-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK14-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK14-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK14-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK14-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK14-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK14-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK14-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK14-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK14-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK14-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK14-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK14-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK14-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK14-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK14-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK14-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK14-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK14-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK14-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK14-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK14-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK14-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK14-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK14-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK14: omp_offload.failed60: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK14: omp_offload.cont61: +// CHECK14-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK14-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK14-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK14-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK14-NEXT: ret i32 [[TMP171]] +// CHECK14-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK14-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK14-NEXT: ret i32 [[TMP149]] // // // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -10708,11 +10630,11 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10794,7 +10716,7 @@ // CHECK14-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4 // CHECK14-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -10836,7 +10758,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10956,11 +10878,11 @@ // CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11032,7 +10954,7 @@ // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11051,7 +10973,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11175,11 +11097,11 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11259,7 +11181,7 @@ // CHECK14-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4 // CHECK14-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11278,7 +11200,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11424,7 +11346,7 @@ // CHECK14-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK14-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: @@ -11442,7 +11364,7 @@ // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK14-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK14: omp_offload.failed5: @@ -11472,7 +11394,7 @@ // CHECK14-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK14-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK14: omp_offload.failed11: @@ -11490,7 +11412,7 @@ // CHECK14-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK14-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK14: omp_offload.failed17: @@ -11520,7 +11442,7 @@ // CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK14-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK14: omp_offload.failed25: @@ -11536,11 +11458,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11588,7 +11510,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11603,7 +11525,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11686,11 +11608,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11738,7 +11660,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11753,7 +11675,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11847,11 +11769,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11907,7 +11829,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -11922,7 +11844,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12030,11 +11952,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12082,7 +12004,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -12097,7 +12019,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12189,11 +12111,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12249,7 +12171,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -12264,7 +12186,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12364,7 +12286,6 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -12372,36 +12293,32 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK15-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK15-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK15-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK15-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK15-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK15-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -12423,308 +12340,274 @@ // CHECK15-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK15-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK15-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK15-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK15-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK15-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK15-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK15-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK15-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK15-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK15-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK15-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK15-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK15-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: // CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK15: omp_offload.cont: -// CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK15-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK15-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK15-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK15-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK15-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK15-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK15-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK15-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK15-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK15-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK15-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK15-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK15-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK15-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK15-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK15-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK15-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK15-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK15-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK15-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK15-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK15-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK15-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK15-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK15-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK15-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK15: omp_offload.failed14: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK15: omp_offload.cont15: +// CHECK15-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK15-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK15-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK15-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK15-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK15-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK15-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK15-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK15-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK15: omp_offload.failed15: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK15: omp_offload.cont16: -// CHECK15-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK15-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK15-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK15-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK15-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK15-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK15-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK15-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK15-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK15-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK15-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK15-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK15-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK15-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK15-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK15-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK15-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK15-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK15-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK15-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK15-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK15-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK15-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK15-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK15-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK15-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK15-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK15-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK15-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK15-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK15-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK15-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK15-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK15-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK15-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK15-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK15-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK15-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK15-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK15-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK15-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK15-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK15-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK15-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK15-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK15-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK15-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK15-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK15-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK15-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK15-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK15-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK15-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK15-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK15-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK15-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK15-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK15-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK15: omp_offload.failed29: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK15: omp_offload.cont30: -// CHECK15-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK15-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK15-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK15-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK15-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK15-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK15-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK15-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK15-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK15-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK15-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK15-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK15-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK15-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK15-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK15-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK15-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK15-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK15-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK15-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK15-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK15-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK15-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK15-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK15-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK15-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK15-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK15-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK15: omp_offload.failed43: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK15: omp_offload.cont44: -// CHECK15-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK15-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK15-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK15-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK15-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK15-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK15-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK15-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK15-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK15-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK15-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK15-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK15-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK15-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK15-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK15-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK15-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK15-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK15-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK15-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK15-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK15-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK15-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK15-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK15-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK15-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK15-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK15-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK15-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK15-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK15-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK15-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK15-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK15-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK15-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK15-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK15: omp_offload.failed58: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK15: omp_offload.cont59: -// CHECK15-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK15-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK15-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK15-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK15-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK15-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK15-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK15-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK15-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK15-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK15-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK15: omp_offload.failed27: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK15: omp_offload.cont28: +// CHECK15-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK15-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK15-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK15-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK15-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK15-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK15-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK15-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK15-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK15-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK15-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK15-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK15-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK15-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK15-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK15-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK15-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK15-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK15-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK15-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK15-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK15-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK15-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK15-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK15-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK15-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK15: omp_offload.failed40: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK15: omp_offload.cont41: +// CHECK15-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK15-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK15-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK15-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK15-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK15-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK15-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK15-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK15-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK15-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK15-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK15-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK15-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK15-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK15-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK15-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK15-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK15-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK15-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK15-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK15-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK15-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK15-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK15-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK15-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK15-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK15-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK15-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK15-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK15-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK15-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK15-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK15-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK15-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK15-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK15: omp_offload.failed54: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK15: omp_offload.cont55: +// CHECK15-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK15-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK15-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK15-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP175]] +// CHECK15-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK15-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP153]] // // // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -13159,11 +13042,11 @@ // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK15-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13241,7 +13124,7 @@ // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -13283,7 +13166,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13398,11 +13281,11 @@ // CHECK15-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13472,7 +13355,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK15-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -13491,7 +13374,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13609,11 +13492,11 @@ // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK15-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13689,7 +13572,7 @@ // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -13708,7 +13591,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -13850,7 +13733,7 @@ // CHECK15-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK15-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: @@ -13868,7 +13751,7 @@ // CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK15-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK15: omp_offload.failed5: @@ -13897,7 +13780,7 @@ // CHECK15-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK15-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK15-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK15: omp_offload.failed11: @@ -13915,7 +13798,7 @@ // CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK15-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK15: omp_offload.failed17: @@ -13944,7 +13827,7 @@ // CHECK15-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK15-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK15: omp_offload.failed24: @@ -13960,11 +13843,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14010,7 +13893,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14025,7 +13908,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14105,11 +13988,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14155,7 +14038,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14170,7 +14053,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14259,11 +14142,11 @@ // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14315,7 +14198,7 @@ // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14330,7 +14213,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14432,11 +14315,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14482,7 +14365,7 @@ // CHECK15: omp.inner.for.body: // CHECK15-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK15-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14497,7 +14380,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14584,11 +14467,11 @@ // CHECK15-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK15-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14640,7 +14523,7 @@ // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK15-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK15-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -14655,7 +14538,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -14751,7 +14634,6 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -14759,36 +14641,32 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK16-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK16-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK16-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK16-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK16-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK16-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK16-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK16-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -14810,308 +14688,274 @@ // CHECK16-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK16-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK16-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK16-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK16-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK16-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK16-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK16-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK16-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK16-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK16-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK16-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK16-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK16-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK16-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK16-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK16-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: // CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK16: omp_offload.cont: -// CHECK16-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK16-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK16-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK16-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK16-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK16-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK16-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK16-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK16-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK16-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK16-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK16-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK16-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK16-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK16-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK16-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK16-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK16-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK16-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK16-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK16-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK16-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK16-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK16-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK16-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK16-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK16-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK16-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK16: omp_offload.failed14: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK16: omp_offload.cont15: +// CHECK16-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK16-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK16-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK16-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK16-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK16-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK16-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK16-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK16-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK16-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK16: omp_offload.failed15: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK16: omp_offload.cont16: -// CHECK16-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK16-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK16-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK16-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK16-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK16-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK16-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK16-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK16-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK16-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK16-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK16-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK16-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK16-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK16-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK16-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK16-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK16-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK16-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK16-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK16-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK16-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK16-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK16-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK16-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK16-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK16-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK16-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK16-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK16-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK16-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK16-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK16-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK16-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK16-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK16-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK16-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK16-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK16-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK16-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK16-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK16-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK16-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK16-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK16-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK16-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK16-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK16-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK16-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK16-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK16-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK16-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK16-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK16-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK16-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK16-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK16-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK16-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK16: omp_offload.failed29: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK16: omp_offload.cont30: -// CHECK16-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK16-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK16-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK16-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK16-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK16-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK16-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK16-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK16-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK16-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK16-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK16-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK16-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK16-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK16-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK16-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK16-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK16-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK16-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK16-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK16-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK16-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK16-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK16-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK16-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK16-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK16-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK16-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK16: omp_offload.failed43: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK16: omp_offload.cont44: -// CHECK16-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK16-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK16-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK16-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK16-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK16-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK16-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK16-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK16-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK16-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK16-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK16-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK16-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK16-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK16-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK16-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK16-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK16-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK16-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK16-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK16-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK16-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK16-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK16-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK16-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK16-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK16-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK16-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK16-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK16-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK16-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK16-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK16-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK16-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK16-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK16-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK16: omp_offload.failed58: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK16: omp_offload.cont59: -// CHECK16-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK16-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK16-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK16-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK16-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK16-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK16-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK16-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK16-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK16-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK16-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK16: omp_offload.failed27: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK16: omp_offload.cont28: +// CHECK16-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK16-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK16-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK16-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK16-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK16-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK16-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK16-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK16-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK16-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK16-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK16-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK16-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK16-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK16-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK16-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK16-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK16-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK16-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK16-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK16-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK16-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK16-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK16-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK16-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK16-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK16: omp_offload.failed40: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK16: omp_offload.cont41: +// CHECK16-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK16-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK16-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK16-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK16-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK16-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK16-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK16-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK16-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK16-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK16-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK16-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK16-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK16-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK16-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK16-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK16-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK16-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK16-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK16-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK16-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK16-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK16-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK16-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK16-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK16-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK16-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK16-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK16-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK16-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK16-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK16-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK16-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK16-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK16-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK16: omp_offload.failed54: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK16: omp_offload.cont55: +// CHECK16-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK16-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK16-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK16-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK16-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK16-NEXT: ret i32 [[TMP175]] +// CHECK16-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK16-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK16-NEXT: ret i32 [[TMP153]] // // // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -15546,11 +15390,11 @@ // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK16-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15628,7 +15472,7 @@ // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -15670,7 +15514,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15785,11 +15629,11 @@ // CHECK16-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15859,7 +15703,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK16-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -15878,7 +15722,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -15996,11 +15840,11 @@ // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK16-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16076,7 +15920,7 @@ // CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16095,7 +15939,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16237,7 +16081,7 @@ // CHECK16-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK16-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: @@ -16255,7 +16099,7 @@ // CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK16-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK16: omp_offload.failed5: @@ -16284,7 +16128,7 @@ // CHECK16-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK16-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK16-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK16: omp_offload.failed11: @@ -16302,7 +16146,7 @@ // CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK16-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK16: omp_offload.failed17: @@ -16331,7 +16175,7 @@ // CHECK16-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK16-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK16: omp_offload.failed24: @@ -16347,11 +16191,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16397,7 +16241,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16412,7 +16256,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16492,11 +16336,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16542,7 +16386,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16557,7 +16401,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16646,11 +16490,11 @@ // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16702,7 +16546,7 @@ // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16717,7 +16561,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16819,11 +16663,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16869,7 +16713,7 @@ // CHECK16: omp.inner.for.body: // CHECK16-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK16-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -16884,7 +16728,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -16971,11 +16815,11 @@ // CHECK16-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK16-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17027,7 +16871,7 @@ // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK16-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK16-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -17042,7 +16886,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -17138,7 +16982,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -17146,36 +16989,32 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK17-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK17-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK17-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK17-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK17-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK17-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK17-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK17-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -17198,310 +17037,276 @@ // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK17-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK17-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK17-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK17-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK17-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK17-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK17-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK17-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: -// CHECK17-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK17-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK17-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK17-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK17-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK17-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK17-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK17-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK17-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK17-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK17-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK17-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK17-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK17-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK17-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK17-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK17-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK17-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK17-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK17-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK17-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK17: omp_offload.failed15: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK17: omp_offload.cont16: +// CHECK17-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK17-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK17-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK17-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK17-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK17-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK17-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK17-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK17-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK17-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK17-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK17-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK17: omp_offload.failed16: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK17: omp_offload.cont17: -// CHECK17-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK17-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK17-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK17-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK17-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK17-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK17-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK17-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK17-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK17-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK17-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK17-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK17-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK17-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK17-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK17-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK17-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK17-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK17-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK17-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK17-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK17-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK17-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK17-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK17-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK17-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK17-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK17-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK17-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK17-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK17-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK17-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK17-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK17-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK17: omp_offload.failed32: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK17: omp_offload.cont33: -// CHECK17-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK17-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK17-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK17-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK17-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK17-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK17-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK17-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK17-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK17-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK17-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK17-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK17-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK17-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK17-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK17-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK17-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK17-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK17-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK17-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK17: omp_offload.failed47: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK17: omp_offload.cont48: -// CHECK17-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK17-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK17-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK17-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK17-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK17-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK17-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK17-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK17-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK17-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK17-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK17-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK17-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK17-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK17-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK17-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK17-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK17-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK17-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK17-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK17-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK17-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK17-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK17-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK17-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK17-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK17-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK17: omp_offload.failed64: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK17: omp_offload.cont65: -// CHECK17-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK17-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK17-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK17-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK17-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK17-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK17-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK17-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK17-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK17-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK17-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK17: omp_offload.failed30: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK17: omp_offload.cont31: +// CHECK17-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK17-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK17-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK17-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK17-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK17-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK17-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK17-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK17-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK17-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK17-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK17-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK17-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK17-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK17-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK17-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK17-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK17-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK17-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK17-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK17: omp_offload.failed44: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK17: omp_offload.cont45: +// CHECK17-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK17-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK17-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK17-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK17-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK17-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK17-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK17-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK17-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK17-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK17-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK17-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK17-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK17-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK17-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK17-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK17-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK17-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK17-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK17-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK17-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK17-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK17-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK17-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK17-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK17-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK17-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK17: omp_offload.failed60: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK17: omp_offload.cont61: +// CHECK17-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK17-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK17-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK17-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK17-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK17-NEXT: ret i32 [[TMP171]] +// CHECK17-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK17-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK17-NEXT: ret i32 [[TMP149]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -17951,11 +17756,11 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18037,7 +17842,7 @@ // CHECK17-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4 // CHECK17-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18079,7 +17884,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18199,11 +18004,11 @@ // CHECK17-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK17-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18275,7 +18080,7 @@ // CHECK17-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK17-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18294,7 +18099,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18418,11 +18223,11 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18502,7 +18307,7 @@ // CHECK17-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4 // CHECK17-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18521,7 +18326,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18667,7 +18472,7 @@ // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK17-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -18685,7 +18490,7 @@ // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK17-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK17: omp_offload.failed5: @@ -18715,7 +18520,7 @@ // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK17-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK17: omp_offload.failed11: @@ -18733,7 +18538,7 @@ // CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK17-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK17: omp_offload.failed17: @@ -18763,7 +18568,7 @@ // CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK17-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK17: omp_offload.failed25: @@ -18779,11 +18584,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18831,7 +18636,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18846,7 +18651,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18929,11 +18734,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18981,7 +18786,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -18996,7 +18801,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19090,11 +18895,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19150,7 +18955,7 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19165,7 +18970,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19273,11 +19078,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19325,7 +19130,7 @@ // CHECK17-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK17-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19340,7 +19145,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19432,11 +19237,11 @@ // CHECK17-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK17-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19492,7 +19297,7 @@ // CHECK17-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK17-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK17-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -19507,7 +19312,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19607,7 +19412,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -19615,36 +19419,32 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK18-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK18-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK18-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK18-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK18-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK18-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK18-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK18-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK18-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -19667,310 +19467,276 @@ // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK18-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK18-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK18-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK18-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK18-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK18-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK18-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK18-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK18: omp_offload.cont: -// CHECK18-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK18-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK18-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK18-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK18-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK18-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK18-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK18-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK18-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK18-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK18-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK18-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK18-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK18-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK18-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK18-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK18-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK18-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK18-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK18-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK18-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK18: omp_offload.failed15: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK18: omp_offload.cont16: +// CHECK18-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK18-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK18-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK18-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK18-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK18-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK18-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK18-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK18-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK18-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK18-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK18-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK18: omp_offload.failed16: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK18: omp_offload.cont17: -// CHECK18-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK18-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK18-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK18-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK18-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK18-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK18-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK18-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK18-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK18-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK18-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK18-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK18-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK18-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK18-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK18-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK18-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK18-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK18-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK18-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK18-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK18-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK18-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK18-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK18-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK18-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK18-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK18-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK18-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK18-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK18-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK18-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK18-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK18-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK18-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK18-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK18-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK18-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK18-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK18-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK18-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK18-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK18-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK18-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK18-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK18: omp_offload.failed32: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK18: omp_offload.cont33: -// CHECK18-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK18-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK18-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK18-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK18-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK18-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK18-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK18-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK18-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK18-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK18-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK18-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK18-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK18-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK18-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK18-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK18-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK18-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK18-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK18-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK18-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK18: omp_offload.failed47: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK18: omp_offload.cont48: -// CHECK18-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK18-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK18-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK18-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK18-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK18-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK18-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK18-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK18-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK18-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK18-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK18-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK18-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK18-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK18-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK18-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK18-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK18-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK18-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK18-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK18-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK18-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK18-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK18-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK18-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK18-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK18-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK18-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK18-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK18-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK18-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK18: omp_offload.failed64: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK18: omp_offload.cont65: -// CHECK18-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK18-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK18-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK18-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK18-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK18-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK18-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK18-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK18-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK18-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK18-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK18-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK18: omp_offload.failed30: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK18: omp_offload.cont31: +// CHECK18-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK18-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK18-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK18-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK18-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK18-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK18-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK18-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK18-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK18-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK18-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK18-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK18-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK18-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK18-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK18-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK18-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK18-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK18-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK18-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK18: omp_offload.failed44: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK18: omp_offload.cont45: +// CHECK18-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK18-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK18-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK18-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK18-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK18-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK18-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK18-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK18-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK18-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK18-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK18-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK18-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK18-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK18-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK18-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK18-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK18-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK18-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK18-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK18-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK18-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK18-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK18-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK18-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK18-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK18-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK18-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK18-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK18: omp_offload.failed60: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK18: omp_offload.cont61: +// CHECK18-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK18-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK18-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK18-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK18-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK18-NEXT: ret i32 [[TMP171]] +// CHECK18-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK18-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK18-NEXT: ret i32 [[TMP149]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -20420,11 +20186,11 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20506,7 +20272,7 @@ // CHECK18-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4 // CHECK18-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -20548,7 +20314,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20668,11 +20434,11 @@ // CHECK18-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK18-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20744,7 +20510,7 @@ // CHECK18-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK18-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -20763,7 +20529,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20887,11 +20653,11 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK18-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -20971,7 +20737,7 @@ // CHECK18-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4 // CHECK18-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -20990,7 +20756,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21136,7 +20902,7 @@ // CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK18-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -21154,7 +20920,7 @@ // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK18-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK18: omp_offload.failed5: @@ -21184,7 +20950,7 @@ // CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK18-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK18: omp_offload.failed11: @@ -21202,7 +20968,7 @@ // CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK18-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK18: omp_offload.failed17: @@ -21232,7 +20998,7 @@ // CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK18-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK18: omp_offload.failed25: @@ -21248,11 +21014,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21300,7 +21066,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21315,7 +21081,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21398,11 +21164,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21450,7 +21216,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21465,7 +21231,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21559,11 +21325,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21619,7 +21385,7 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21634,7 +21400,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21742,11 +21508,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21794,7 +21560,7 @@ // CHECK18-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 // CHECK18-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21809,7 +21575,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21901,11 +21667,11 @@ // CHECK18-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK18-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -21961,7 +21727,7 @@ // CHECK18-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK18-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4 // CHECK18-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]) // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -21976,7 +21742,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -22076,7 +21842,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -22084,36 +21849,32 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK19-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK19-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -22135,308 +21896,274 @@ // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK19-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK19-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK19-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK19-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK19-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK19-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK19-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: -// CHECK19-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK19-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK19-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK19-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK19-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK19-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK19-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK19-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK19-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK19: omp_offload.failed14: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK19: omp_offload.cont15: +// CHECK19-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK19-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK19-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK19-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK19-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK19-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK19-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK19-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK19: omp_offload.failed15: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK19: omp_offload.cont16: -// CHECK19-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK19-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK19-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK19-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK19-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK19-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK19-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK19-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK19-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK19-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK19-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK19-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK19-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK19-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK19-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK19-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK19-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK19-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK19-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK19-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK19-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK19-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK19-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK19-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK19-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK19-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK19-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK19: omp_offload.failed29: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK19: omp_offload.cont30: -// CHECK19-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK19-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK19-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK19-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK19-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK19-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK19-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK19-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK19-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK19-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK19-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK19-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK19-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK19: omp_offload.failed43: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK19: omp_offload.cont44: -// CHECK19-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK19-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK19-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK19-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK19-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK19-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK19-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK19-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK19-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK19-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK19-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK19-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK19-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK19-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK19-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK19-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK19-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK19-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK19-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK19-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK19-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK19-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK19-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK19-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK19: omp_offload.failed58: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK19: omp_offload.cont59: -// CHECK19-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK19-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK19-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK19-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK19-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK19-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK19-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK19-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK19-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK19: omp_offload.failed27: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK19: omp_offload.cont28: +// CHECK19-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK19-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK19-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK19-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK19-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK19-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK19-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK19-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK19-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK19-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK19-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK19-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK19-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK19: omp_offload.failed40: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK19: omp_offload.cont41: +// CHECK19-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK19-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK19-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK19-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK19-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK19-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK19-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK19-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK19-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK19-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK19-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK19-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK19-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK19-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK19-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK19-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK19-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK19-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK19-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK19-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK19: omp_offload.failed54: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK19: omp_offload.cont55: +// CHECK19-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK19-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK19-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK19-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK19-NEXT: ret i32 [[TMP175]] +// CHECK19-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK19-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK19-NEXT: ret i32 [[TMP153]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -22871,11 +22598,11 @@ // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -22953,7 +22680,7 @@ // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -22995,7 +22722,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23110,11 +22837,11 @@ // CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23184,7 +22911,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -23203,7 +22930,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23321,11 +23048,11 @@ // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23401,7 +23128,7 @@ // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -23420,7 +23147,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23562,7 +23289,7 @@ // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK19-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -23580,7 +23307,7 @@ // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK19-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK19: omp_offload.failed5: @@ -23609,7 +23336,7 @@ // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK19-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK19: omp_offload.failed11: @@ -23627,7 +23354,7 @@ // CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK19-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK19: omp_offload.failed17: @@ -23656,7 +23383,7 @@ // CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK19-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK19: omp_offload.failed24: @@ -23672,11 +23399,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23722,7 +23449,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -23737,7 +23464,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23817,11 +23544,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23867,7 +23594,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -23882,7 +23609,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23971,11 +23698,11 @@ // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24027,7 +23754,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24042,7 +23769,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24144,11 +23871,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24194,7 +23921,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24209,7 +23936,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24296,11 +24023,11 @@ // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24352,7 +24079,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -24367,7 +24094,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24463,7 +24190,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -24471,36 +24197,32 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK20-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK20-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK20-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -24522,308 +24244,274 @@ // CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK20-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK20-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK20-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK20-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK20-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK20-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK20-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK20: omp_offload.cont: -// CHECK20-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK20-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK20-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK20-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK20-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK20-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK20-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK20-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK20-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK20: omp_offload.failed14: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK20: omp_offload.cont15: +// CHECK20-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK20-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK20-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK20-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK20-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK20-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK20-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK20-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK20: omp_offload.failed15: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l153(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK20: omp_offload.cont16: -// CHECK20-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK20-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK20-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK20-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK20-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK20-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK20-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK20-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK20-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK20-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK20-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK20-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK20-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK20-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK20-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK20-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK20-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK20-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK20-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK20-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK20-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK20-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK20-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK20-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK20-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK20-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK20-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK20-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK20-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK20-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK20-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK20-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK20-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK20-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK20-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK20-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK20-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK20-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK20-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK20: omp_offload.failed29: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK20: omp_offload.cont30: -// CHECK20-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK20-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK20-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK20-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK20-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK20-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK20-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK20-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK20-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK20-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK20-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK20-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK20-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK20-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK20: omp_offload.failed43: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK20: omp_offload.cont44: -// CHECK20-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK20-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK20-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK20-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK20-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK20-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK20-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK20-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK20-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK20-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK20-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK20-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK20-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK20-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK20-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK20-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK20-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK20-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK20-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK20-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK20-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK20-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK20-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK20-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK20-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK20-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK20-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK20: omp_offload.failed58: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK20: omp_offload.cont59: -// CHECK20-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK20-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK20-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK20-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK20-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK20-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK20-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK20-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK20-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK20: omp_offload.failed27: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l158(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK20: omp_offload.cont28: +// CHECK20-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK20-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK20-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK20-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK20-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK20-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK20-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK20-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK20-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK20-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK20-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK20-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK20-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK20-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK20: omp_offload.failed40: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l163(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK20: omp_offload.cont41: +// CHECK20-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK20-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK20-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK20-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK20-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK20-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK20-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK20-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK20-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK20-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK20-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK20-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK20-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK20-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK20-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK20-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK20-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK20-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK20-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK20-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK20-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK20: omp_offload.failed54: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l168(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK20: omp_offload.cont55: +// CHECK20-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK20-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK20-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK20-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK20-NEXT: ret i32 [[TMP175]] +// CHECK20-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK20-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK20-NEXT: ret i32 [[TMP153]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l148 @@ -25258,11 +24946,11 @@ // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25340,7 +25028,7 @@ // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -25382,7 +25070,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25497,11 +25185,11 @@ // CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25571,7 +25259,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -25590,7 +25278,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25708,11 +25396,11 @@ // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25788,7 +25476,7 @@ // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -25807,7 +25495,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25949,7 +25637,7 @@ // CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l116.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK20-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -25967,7 +25655,7 @@ // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l121.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK20-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK20: omp_offload.failed5: @@ -25996,7 +25684,7 @@ // CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l126.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK20-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK20: omp_offload.failed11: @@ -26014,7 +25702,7 @@ // CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l131.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK20-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK20: omp_offload.failed17: @@ -26043,7 +25731,7 @@ // CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l136.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK20-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK20: omp_offload.failed24: @@ -26059,11 +25747,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26109,7 +25797,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26124,7 +25812,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26204,11 +25892,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26254,7 +25942,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26269,7 +25957,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26358,11 +26046,11 @@ // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26414,7 +26102,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26429,7 +26117,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26531,11 +26219,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26581,7 +26269,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26596,7 +26284,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26683,11 +26371,11 @@ // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26739,7 +26427,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]) // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4 @@ -26754,7 +26442,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_codegen.cpp @@ -3277,7 +3277,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 @@ -3303,66 +3302,58 @@ // CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 8, i64* [[TMP12]], align 8 -// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 8 -// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 8 +// CHECK9-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP19]], align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* -// CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 4, i64* [[TMP24]], align 8 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP6]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK9-NEXT: store i64 [[TMP6]], i64* [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 4, i64* [[TMP30]], align 8 -// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK9-NEXT: store i64 [[TMP6]], i64* [[TMP24]], align 8 +// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK9-NEXT: store i64 [[TMP6]], i64* [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK9-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK9-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK9-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK9-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK9-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK9-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK9-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74(i64 [[TMP1]], i32* [[VLA]], i64 [[TMP4]], i64 [[TMP6]]) #[[ATTR5:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK9-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK9-NEXT: ret i32 [[TMP41]] +// CHECK9-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK9-NEXT: ret i32 [[TMP36]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74 @@ -3644,7 +3635,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 @@ -3670,66 +3660,58 @@ // CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 8, i64* [[TMP12]], align 8 -// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 8 -// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 8 +// CHECK10-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP19]], align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* -// CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 4, i64* [[TMP24]], align 8 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP6]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK10-NEXT: store i64 [[TMP6]], i64* [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 4, i64* [[TMP30]], align 8 -// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK10-NEXT: store i64 [[TMP6]], i64* [[TMP24]], align 8 +// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK10-NEXT: store i64 [[TMP6]], i64* [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK10-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK10-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK10-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK10-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK10-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK10-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK10-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74(i64 [[TMP1]], i32* [[VLA]], i64 [[TMP4]], i64 [[TMP6]]) #[[ATTR5:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK10-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK10-NEXT: ret i32 [[TMP41]] +// CHECK10-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK10-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK10-NEXT: ret i32 [[TMP36]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74 @@ -4011,7 +3993,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4035,66 +4016,58 @@ // CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP12]], align 4 -// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 4 +// CHECK11-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 4 +// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* -// CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* -// CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP29]], align 4 -// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK11-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP24]], align 4 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP26]], align 4 +// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK11-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK11-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK11-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK11-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK11-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK11-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74(i32 [[TMP0]], i32* [[VLA]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR5:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK11-NEXT: ret i32 [[TMP41]] +// CHECK11-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK11-NEXT: ret i32 [[TMP36]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74 @@ -4369,7 +4342,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4393,66 +4365,58 @@ // CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP12]], align 4 -// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 4 +// CHECK12-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 4 +// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* -// CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* -// CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP29]], align 4 -// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK12-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP24]], align 4 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP26]], align 4 +// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK12-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK12-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK12-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK12-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK12-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK12-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74(i32 [[TMP0]], i32* [[VLA]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR5:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK12-NEXT: ret i32 [[TMP41]] +// CHECK12-NEXT: [[TMP36:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK12-NEXT: ret i32 [[TMP36]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l74 @@ -6314,7 +6278,6 @@ // CHECK25-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK25-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK25-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 -// CHECK25-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 8 // CHECK25-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK25-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK25-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 @@ -6343,57 +6306,49 @@ // CHECK25-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK25-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK25-NEXT: store i64 [[TMP1]], i64* [[TMP11]], align 8 -// CHECK25-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK25-NEXT: store i64 8, i64* [[TMP12]], align 8 -// CHECK25-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK25-NEXT: store i8* null, i8** [[TMP13]], align 8 -// CHECK25-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK25-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 8 -// CHECK25-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK25-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 -// CHECK25-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK25-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 8 -// CHECK25-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK25-NEXT: store i8* null, i8** [[TMP19]], align 8 -// CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK25-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK25-NEXT: store i8* null, i8** [[TMP12]], align 8 +// CHECK25-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK25-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 8 +// CHECK25-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK25-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK25-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 8 +// CHECK25-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 8 +// CHECK25-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK25-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK25-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK25-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK25-NEXT: store i64 [[TMP4]], i64* [[TMP19]], align 8 +// CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK25-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* // CHECK25-NEXT: store i64 [[TMP4]], i64* [[TMP21]], align 8 -// CHECK25-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK25-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* -// CHECK25-NEXT: store i64 [[TMP4]], i64* [[TMP23]], align 8 -// CHECK25-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK25-NEXT: store i64 4, i64* [[TMP24]], align 8 -// CHECK25-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK25-NEXT: store i8* null, i8** [[TMP25]], align 8 -// CHECK25-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK25-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK25-NEXT: store i64 [[TMP6]], i64* [[TMP27]], align 8 -// CHECK25-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK25-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK25-NEXT: store i64 [[TMP6]], i64* [[TMP29]], align 8 -// CHECK25-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK25-NEXT: store i64 4, i64* [[TMP30]], align 8 -// CHECK25-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK25-NEXT: store i8* null, i8** [[TMP31]], align 8 -// CHECK25-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK25-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK25-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK25-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK25-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK25-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK25-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK25-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK25-NEXT: store i64 [[TMP6]], i64* [[TMP24]], align 8 +// CHECK25-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK25-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK25-NEXT: store i64 [[TMP6]], i64* [[TMP26]], align 8 +// CHECK25-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK25-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK25-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK25-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK25-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK25-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK25-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK25-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK25-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK25-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK25-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK25-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK25-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK25-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK25-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK25-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK25-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK25-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK25-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK25-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK25-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK25-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK25: omp_offload.failed: // CHECK25-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166(i64 [[TMP1]], i32* [[VLA]], i64 [[TMP4]], i64 [[TMP6]]) #[[ATTR5:[0-9]+]] // CHECK25-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6401,10 +6356,10 @@ // CHECK25-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK25-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP41]]) // CHECK25-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK25-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK25-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK25-NEXT: ret i32 [[TMP43]] +// CHECK25-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK25-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK25-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK25-NEXT: ret i32 [[TMP38]] // // // CHECK25-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166 @@ -6719,7 +6674,7 @@ // CHECK25-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK25-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK25-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 10) -// CHECK25-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK25-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK25-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK25-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK25: omp_offload.failed: @@ -6925,7 +6880,6 @@ // CHECK26-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK26-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK26-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 -// CHECK26-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 8 // CHECK26-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK26-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK26-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 @@ -6954,57 +6908,49 @@ // CHECK26-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK26-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK26-NEXT: store i64 [[TMP1]], i64* [[TMP11]], align 8 -// CHECK26-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK26-NEXT: store i64 8, i64* [[TMP12]], align 8 -// CHECK26-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK26-NEXT: store i8* null, i8** [[TMP13]], align 8 -// CHECK26-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK26-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 8 -// CHECK26-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK26-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 -// CHECK26-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK26-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 8 -// CHECK26-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK26-NEXT: store i8* null, i8** [[TMP19]], align 8 -// CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK26-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK26-NEXT: store i8* null, i8** [[TMP12]], align 8 +// CHECK26-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK26-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 8 +// CHECK26-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK26-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK26-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 8 +// CHECK26-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 8 +// CHECK26-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK26-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK26-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK26-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK26-NEXT: store i64 [[TMP4]], i64* [[TMP19]], align 8 +// CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK26-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* // CHECK26-NEXT: store i64 [[TMP4]], i64* [[TMP21]], align 8 -// CHECK26-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK26-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* -// CHECK26-NEXT: store i64 [[TMP4]], i64* [[TMP23]], align 8 -// CHECK26-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK26-NEXT: store i64 4, i64* [[TMP24]], align 8 -// CHECK26-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK26-NEXT: store i8* null, i8** [[TMP25]], align 8 -// CHECK26-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK26-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK26-NEXT: store i64 [[TMP6]], i64* [[TMP27]], align 8 -// CHECK26-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK26-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK26-NEXT: store i64 [[TMP6]], i64* [[TMP29]], align 8 -// CHECK26-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK26-NEXT: store i64 4, i64* [[TMP30]], align 8 -// CHECK26-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK26-NEXT: store i8* null, i8** [[TMP31]], align 8 -// CHECK26-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK26-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK26-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK26-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK26-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK26-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK26-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK26-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i64* +// CHECK26-NEXT: store i64 [[TMP6]], i64* [[TMP24]], align 8 +// CHECK26-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK26-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK26-NEXT: store i64 [[TMP6]], i64* [[TMP26]], align 8 +// CHECK26-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK26-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK26-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK26-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK26-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK26-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK26-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK26-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK26-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK26-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK26-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK26-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK26-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK26-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK26-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK26-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK26-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK26-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK26-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK26-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK26-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK26-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK26: omp_offload.failed: // CHECK26-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166(i64 [[TMP1]], i32* [[VLA]], i64 [[TMP4]], i64 [[TMP6]]) #[[ATTR5:[0-9]+]] // CHECK26-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -7012,10 +6958,10 @@ // CHECK26-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK26-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP41]]) // CHECK26-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK26-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK26-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK26-NEXT: ret i32 [[TMP43]] +// CHECK26-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK26-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK26-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK26-NEXT: ret i32 [[TMP38]] // // // CHECK26-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166 @@ -7330,7 +7276,7 @@ // CHECK26-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK26-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK26-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 10) -// CHECK26-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK26-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK26-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK26-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK26: omp_offload.failed: @@ -7536,7 +7482,6 @@ // CHECK27-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK27-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK27-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 -// CHECK27-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 4 // CHECK27-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK27-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK27-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -7563,57 +7508,49 @@ // CHECK27-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK27-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK27-NEXT: store i32 [[TMP0]], i32* [[TMP11]], align 4 -// CHECK27-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK27-NEXT: store i64 4, i64* [[TMP12]], align 4 -// CHECK27-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK27-NEXT: store i8* null, i8** [[TMP13]], align 4 -// CHECK27-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK27-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 4 -// CHECK27-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK27-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 -// CHECK27-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK27-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 4 -// CHECK27-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK27-NEXT: store i8* null, i8** [[TMP19]], align 4 -// CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK27-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK27-NEXT: store i8* null, i8** [[TMP12]], align 4 +// CHECK27-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK27-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 4 +// CHECK27-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK27-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK27-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 4 +// CHECK27-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 4 +// CHECK27-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK27-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK27-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK27-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK27-NEXT: store i32 [[TMP3]], i32* [[TMP19]], align 4 +// CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK27-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* // CHECK27-NEXT: store i32 [[TMP3]], i32* [[TMP21]], align 4 -// CHECK27-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK27-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* -// CHECK27-NEXT: store i32 [[TMP3]], i32* [[TMP23]], align 4 -// CHECK27-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK27-NEXT: store i64 4, i64* [[TMP24]], align 4 -// CHECK27-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK27-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK27-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK27-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* -// CHECK27-NEXT: store i32 [[TMP5]], i32* [[TMP27]], align 4 -// CHECK27-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK27-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK27-NEXT: store i32 [[TMP5]], i32* [[TMP29]], align 4 -// CHECK27-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK27-NEXT: store i64 4, i64* [[TMP30]], align 4 -// CHECK27-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK27-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK27-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK27-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK27-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK27-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK27-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK27-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK27-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK27-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK27-NEXT: store i32 [[TMP5]], i32* [[TMP24]], align 4 +// CHECK27-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK27-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK27-NEXT: store i32 [[TMP5]], i32* [[TMP26]], align 4 +// CHECK27-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK27-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK27-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK27-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK27-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK27-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK27-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK27-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK27-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK27-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK27-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK27-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK27-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK27-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK27-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK27-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK27-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK27-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK27-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK27-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK27-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK27-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK27: omp_offload.failed: // CHECK27-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166(i32 [[TMP0]], i32* [[VLA]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR5:[0-9]+]] // CHECK27-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -7621,10 +7558,10 @@ // CHECK27-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK27-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP41]]) // CHECK27-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK27-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK27-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK27-NEXT: ret i32 [[TMP43]] +// CHECK27-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK27-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK27-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK27-NEXT: ret i32 [[TMP38]] // // // CHECK27-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166 @@ -7930,7 +7867,7 @@ // CHECK27-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK27-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK27-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 10) -// CHECK27-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK27-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK27-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK27-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK27: omp_offload.failed: @@ -8129,7 +8066,6 @@ // CHECK28-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK28-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK28-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 -// CHECK28-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 4 // CHECK28-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK28-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK28-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -8156,57 +8092,49 @@ // CHECK28-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK28-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK28-NEXT: store i32 [[TMP0]], i32* [[TMP11]], align 4 -// CHECK28-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK28-NEXT: store i64 4, i64* [[TMP12]], align 4 -// CHECK28-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK28-NEXT: store i8* null, i8** [[TMP13]], align 4 -// CHECK28-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK28-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32** -// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP15]], align 4 -// CHECK28-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK28-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** -// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 -// CHECK28-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK28-NEXT: store i64 [[TMP7]], i64* [[TMP18]], align 4 -// CHECK28-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK28-NEXT: store i8* null, i8** [[TMP19]], align 4 -// CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK28-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK28-NEXT: store i8* null, i8** [[TMP12]], align 4 +// CHECK28-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK28-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32** +// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP14]], align 4 +// CHECK28-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK28-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32** +// CHECK28-NEXT: store i32* [[VLA]], i32** [[TMP16]], align 4 +// CHECK28-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 1), align 4 +// CHECK28-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK28-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK28-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK28-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK28-NEXT: store i32 [[TMP3]], i32* [[TMP19]], align 4 +// CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK28-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* // CHECK28-NEXT: store i32 [[TMP3]], i32* [[TMP21]], align 4 -// CHECK28-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK28-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* -// CHECK28-NEXT: store i32 [[TMP3]], i32* [[TMP23]], align 4 -// CHECK28-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK28-NEXT: store i64 4, i64* [[TMP24]], align 4 -// CHECK28-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK28-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK28-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK28-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* -// CHECK28-NEXT: store i32 [[TMP5]], i32* [[TMP27]], align 4 -// CHECK28-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK28-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK28-NEXT: store i32 [[TMP5]], i32* [[TMP29]], align 4 -// CHECK28-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK28-NEXT: store i64 4, i64* [[TMP30]], align 4 -// CHECK28-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK28-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK28-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK28-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK28-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK28-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK28-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK28-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK28-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK28-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK28-NEXT: store i32 [[TMP5]], i32* [[TMP24]], align 4 +// CHECK28-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK28-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK28-NEXT: store i32 [[TMP5]], i32* [[TMP26]], align 4 +// CHECK28-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK28-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK28-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK28-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK28-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK28-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK28-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK28-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK28-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK28-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK28-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK28-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK28-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK28-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK28-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK28-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK28-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK28-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK28-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK28-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK28-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK28-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK28: omp_offload.failed: // CHECK28-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166(i32 [[TMP0]], i32* [[VLA]], i32 [[TMP3]], i32 [[TMP5]]) #[[ATTR5:[0-9]+]] // CHECK28-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -8214,10 +8142,10 @@ // CHECK28-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK28-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP41]]) // CHECK28-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK28-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK28-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK28-NEXT: ret i32 [[TMP43]] +// CHECK28-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK28-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK28-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK28-NEXT: ret i32 [[TMP38]] // // // CHECK28-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l166 @@ -8523,7 +8451,7 @@ // CHECK28-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK28-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK28-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i64 10) -// CHECK28-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) +// CHECK28-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB4]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l155.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 0) // CHECK28-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK28-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK28: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_collapse_codegen.cpp @@ -29,7 +29,7 @@ #pragma omp teams distribute parallel for simd collapse(2) for(int i = 0; i < X; i++) { for(int j = 0; j < Y; j++) { - a[i][j] = (T)0; + a[i][j] = (T)0; } } @@ -1331,7 +1331,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1368,74 +1367,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1443,10 +1432,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86 @@ -1797,7 +1786,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -2012,7 +2001,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2049,74 +2037,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2124,10 +2102,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86 @@ -2478,7 +2456,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -2693,7 +2671,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2727,74 +2704,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2802,10 +2769,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86 @@ -3156,7 +3123,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -3365,7 +3332,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -3399,74 +3365,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -3474,10 +3430,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l86 @@ -3828,7 +3784,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l72.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_dist_schedule_codegen.cpp @@ -3068,7 +3068,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3076,19 +3075,17 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK9-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK9-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -3111,184 +3108,164 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK9-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK9-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK9-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK9-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK9-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK9-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK9-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK9-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK9-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK9-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK9-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK9-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK9-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK9-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK9-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK9-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK9-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK9-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK9-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK9-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK9-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK9-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK9-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK9-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK9-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK9-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK9-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK9-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK9-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK9-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK9-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK9-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK9-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK9-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK9: omp_offload.failed32: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK9: omp_offload.cont33: -// CHECK9-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP101]]) +// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK9-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK9-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK9-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK9-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK9-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK9-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK9-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK9: omp_offload.failed30: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK9: omp_offload.cont31: +// CHECK9-NEXT: [[TMP88:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP88]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP102:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP102]]) -// CHECK9-NEXT: [[TMP103:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP103]] +// CHECK9-NEXT: [[TMP89:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP89]]) +// CHECK9-NEXT: [[TMP90:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP90]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108 @@ -3786,11 +3763,11 @@ // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3872,7 +3849,7 @@ // CHECK9-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4, !llvm.access.group !24 // CHECK9-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !24 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !24 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !24 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 @@ -3926,7 +3903,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4078,7 +4055,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -4096,7 +4073,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -4126,7 +4103,7 @@ // CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK9-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK9-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK9-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -4142,11 +4119,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4194,7 +4171,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 @@ -4216,7 +4193,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4306,11 +4283,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4358,7 +4335,7 @@ // CHECK9-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK9-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 // CHECK9-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 @@ -4380,7 +4357,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4481,11 +4458,11 @@ // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK9-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4541,7 +4518,7 @@ // CHECK9-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !42 // CHECK9-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !42 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 // CHECK9-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK9: omp.inner.for.inc: // CHECK9-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 @@ -4584,7 +4561,7 @@ // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4692,7 +4669,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4700,19 +4676,17 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK10-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK10-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -4735,184 +4709,164 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK10-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK10-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK10-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK10-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK10-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK10-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK10-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK10-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK10-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK10-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK10-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK10-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK10-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK10-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK10-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK10-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK10-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK10-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK10-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK10-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK10-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK10-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK10-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK10-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK10-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK10-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK10-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK10-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK10-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK10-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK10-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK10-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK10-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK10-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK10: omp_offload.failed32: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK10: omp_offload.cont33: -// CHECK10-NEXT: [[TMP101:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP101]]) +// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK10-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK10-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK10-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK10-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK10-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK10-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK10-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK10: omp_offload.failed30: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK10: omp_offload.cont31: +// CHECK10-NEXT: [[TMP88:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP88]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP102:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP102]]) -// CHECK10-NEXT: [[TMP103:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP103]] +// CHECK10-NEXT: [[TMP89:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP89]]) +// CHECK10-NEXT: [[TMP90:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP90]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108 @@ -5410,11 +5364,11 @@ // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5496,7 +5450,7 @@ // CHECK10-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4, !llvm.access.group !24 // CHECK10-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !24 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !24 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !24 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !24 @@ -5550,7 +5504,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5702,7 +5656,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -5720,7 +5674,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -5750,7 +5704,7 @@ // CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK10-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK10-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK10-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -5766,11 +5720,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5818,7 +5772,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !30 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !30 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !30 @@ -5840,7 +5794,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5930,11 +5884,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -5982,7 +5936,7 @@ // CHECK10-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK10-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !36 // CHECK10-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !36 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !36 @@ -6004,7 +5958,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6105,11 +6059,11 @@ // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK10-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6165,7 +6119,7 @@ // CHECK10-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !42 // CHECK10-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !42 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !42 // CHECK10-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK10: omp.inner.for.inc: // CHECK10-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !42 @@ -6208,7 +6162,7 @@ // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -6316,7 +6270,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6324,19 +6277,17 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK11-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK11-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -6358,183 +6309,163 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK11-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK11-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK11-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK11-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK11-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK11-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK11-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK11-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK11-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK11-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK11-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK11-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK11-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK11-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK11-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK11-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK11-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK11-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK11-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK11-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK11-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK11-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK11-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK11-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK11-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK11: omp_offload.failed29: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK11: omp_offload.cont30: -// CHECK11-NEXT: [[TMP103:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP103]]) +// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK11-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK11-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK11-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK11-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK11-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK11: omp_offload.failed27: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK11: omp_offload.cont28: +// CHECK11-NEXT: [[TMP90:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP90]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP104:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP104]]) -// CHECK11-NEXT: [[TMP105:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP105]] +// CHECK11-NEXT: [[TMP91:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP91]]) +// CHECK11-NEXT: [[TMP92:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP92]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108 @@ -7017,11 +6948,11 @@ // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7099,7 +7030,7 @@ // CHECK11-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !25 // CHECK11-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 // CHECK11-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !25 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !25 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 @@ -7153,7 +7084,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7301,7 +7232,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -7319,7 +7250,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -7348,7 +7279,7 @@ // CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK11-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK11-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK11-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -7364,11 +7295,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7414,7 +7345,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31 @@ -7436,7 +7367,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7523,11 +7454,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7573,7 +7504,7 @@ // CHECK11: omp.inner.for.body: // CHECK11-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37 // CHECK11-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37 @@ -7595,7 +7526,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7691,11 +7622,11 @@ // CHECK11-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7747,7 +7678,7 @@ // CHECK11-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !43 // CHECK11-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 // CHECK11-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 // CHECK11-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK11: omp.inner.for.inc: // CHECK11-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 @@ -7790,7 +7721,7 @@ // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -7894,7 +7825,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -7902,19 +7832,17 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK12-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK12-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -7936,183 +7864,163 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l113(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK12-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK12-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK12-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK12-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK12-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK12-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK12-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK12-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK12-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK12-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK12-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK12-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK12-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK12-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK12-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK12-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK12-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK12-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK12-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK12-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK12-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK12-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK12-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK12-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK12-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK12-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK12: omp_offload.failed29: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK12: omp_offload.cont30: -// CHECK12-NEXT: [[TMP103:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP103]]) +// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK12-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK12-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK12-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK12-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK12-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK12: omp_offload.failed27: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l118(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK12: omp_offload.cont28: +// CHECK12-NEXT: [[TMP90:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP90]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP104:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP104]]) -// CHECK12-NEXT: [[TMP105:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP105]] +// CHECK12-NEXT: [[TMP91:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP91]]) +// CHECK12-NEXT: [[TMP92:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP92]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l108 @@ -8595,11 +8503,11 @@ // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8677,7 +8585,7 @@ // CHECK12-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !25 // CHECK12-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 // CHECK12-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !25 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !25 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !25 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !25 @@ -8731,7 +8639,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8879,7 +8787,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l86.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -8897,7 +8805,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.13, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.14, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l91.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -8926,7 +8834,7 @@ // CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.17, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.18, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK12-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l96.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK12-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK12-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -8942,11 +8850,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -8992,7 +8900,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !31 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !31 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !31 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !31 @@ -9014,7 +8922,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9101,11 +9009,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9151,7 +9059,7 @@ // CHECK12: omp.inner.for.body: // CHECK12-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !37 // CHECK12-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !37 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !37 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !37 @@ -9173,7 +9081,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9269,11 +9177,11 @@ // CHECK12-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK12-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -9325,7 +9233,7 @@ // CHECK12-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !43 // CHECK12-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 // CHECK12-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !43 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..16 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !43 // CHECK12-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK12: omp.inner.for.inc: // CHECK12-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !43 @@ -9368,7 +9276,7 @@ // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..16 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp b/clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_parallel_for_simd_schedule_codegen.cpp @@ -8766,7 +8766,6 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK13-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -8774,36 +8773,32 @@ // CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK13-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK13-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK13-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK13-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK13-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK13-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK13-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK13-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK13-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK13-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK13-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -8826,310 +8821,276 @@ // CHECK13-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK13-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK13-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK13-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK13-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK13-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK13-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK13-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK13-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK13-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK13-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK13-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK13-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK13-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK13-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK13-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK13-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK13-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK13-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK13-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK13-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK13-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK13-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK13-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK13-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: // CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK13: omp_offload.cont: -// CHECK13-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK13-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK13-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK13-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK13-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK13-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK13-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK13-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK13-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK13-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK13-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK13-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK13-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK13-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK13-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK13-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK13-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK13-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK13-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK13-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK13-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK13-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK13-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK13-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK13-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK13-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK13-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK13-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK13-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK13-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK13-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK13: omp_offload.failed15: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK13: omp_offload.cont16: +// CHECK13-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK13-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK13-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK13-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK13-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK13-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK13-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK13-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK13-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK13-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK13-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK13-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK13: omp_offload.failed16: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK13: omp_offload.cont17: -// CHECK13-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK13-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK13-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK13-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK13-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK13-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK13-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK13-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK13-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK13-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK13-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK13-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK13-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK13-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK13-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK13-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK13-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK13-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK13-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK13-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK13-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK13-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK13-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK13-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK13-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK13-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK13-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK13-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK13-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK13-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK13-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK13-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK13-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK13-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK13-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK13-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK13-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK13-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK13-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK13-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK13-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK13-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK13-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK13-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK13-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK13-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK13-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK13-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK13-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK13-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK13-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK13-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK13-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK13-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK13-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK13-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK13: omp_offload.failed32: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK13: omp_offload.cont33: -// CHECK13-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK13-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK13-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK13-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK13-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK13-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK13-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK13-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK13-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK13-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK13-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK13-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK13-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK13-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK13-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK13-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK13-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK13-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK13-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK13-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK13-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK13-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK13-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK13-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK13-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK13-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK13-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK13: omp_offload.failed47: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK13: omp_offload.cont48: -// CHECK13-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK13-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK13-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK13-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK13-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK13-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK13-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK13-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK13-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK13-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK13-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK13-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK13-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK13-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK13-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK13-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK13-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK13-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK13-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK13-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK13-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK13-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK13-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK13-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK13-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK13-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK13-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK13-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK13-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK13-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK13-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK13-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK13-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK13-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK13-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK13-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK13-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK13-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK13-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK13-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK13-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK13-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK13-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK13-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK13-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK13-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK13-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK13-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK13-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK13: omp_offload.failed64: -// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK13: omp_offload.cont65: -// CHECK13-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK13-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK13-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK13-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK13-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK13-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK13-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK13-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK13-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK13-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK13-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK13: omp_offload.failed30: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK13: omp_offload.cont31: +// CHECK13-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK13-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK13-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK13-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK13-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK13-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK13-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK13-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK13-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK13-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK13-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK13-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK13-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK13-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK13-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK13-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK13-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK13-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK13-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK13-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK13-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK13-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK13-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK13-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK13-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK13: omp_offload.failed44: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK13: omp_offload.cont45: +// CHECK13-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK13-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK13-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK13-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK13-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK13-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK13-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK13-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK13-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK13-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK13-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK13-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK13-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK13-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK13-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK13-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK13-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK13-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK13-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK13-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK13-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK13-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK13-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK13-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK13-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK13-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK13-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK13-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK13-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK13-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK13-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK13-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK13-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK13-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK13-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK13-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK13-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK13-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK13-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK13-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK13-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK13-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK13-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK13-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK13-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK13-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK13: omp_offload.failed60: +// CHECK13-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK13-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK13: omp_offload.cont61: +// CHECK13-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK13-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK13-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK13-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK13-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK13-NEXT: ret i32 [[TMP171]] +// CHECK13-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK13-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK13-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK13-NEXT: ret i32 [[TMP149]] // // // CHECK13-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -9627,11 +9588,11 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9713,7 +9674,7 @@ // CHECK13-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4, !llvm.access.group !28 // CHECK13-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -9767,7 +9728,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9899,11 +9860,11 @@ // CHECK13-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK13-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK13-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -9975,7 +9936,7 @@ // CHECK13-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK13-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34 // CHECK13-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -10006,7 +9967,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10142,11 +10103,11 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK13-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10226,7 +10187,7 @@ // CHECK13-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4, !llvm.access.group !40 // CHECK13-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -10257,7 +10218,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10415,7 +10376,7 @@ // CHECK13-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK13-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK13-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK13: omp_offload.failed: @@ -10433,7 +10394,7 @@ // CHECK13-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK13-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK13-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK13: omp_offload.failed5: @@ -10463,7 +10424,7 @@ // CHECK13-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK13-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK13-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK13: omp_offload.failed11: @@ -10481,7 +10442,7 @@ // CHECK13-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK13-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK13-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK13: omp_offload.failed17: @@ -10511,7 +10472,7 @@ // CHECK13-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK13-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK13-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK13-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK13-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK13-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK13-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK13: omp_offload.failed25: @@ -10527,11 +10488,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10579,7 +10540,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -10601,7 +10562,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10691,11 +10652,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10743,7 +10704,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -10765,7 +10726,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10866,11 +10827,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -10926,7 +10887,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -10948,7 +10909,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11063,11 +11024,11 @@ // CHECK13-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK13-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK13-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11115,7 +11076,7 @@ // CHECK13-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK13-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK13-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -11137,7 +11098,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11236,11 +11197,11 @@ // CHECK13-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK13-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11296,7 +11257,7 @@ // CHECK13-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK13-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK13-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK13-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK13-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK13: omp.inner.for.inc: // CHECK13-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -11318,7 +11279,7 @@ // CHECK13-NEXT: ret void // // -// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK13-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK13-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK13-NEXT: entry: // CHECK13-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -11425,7 +11386,6 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK14-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -11433,36 +11393,32 @@ // CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK14-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK14-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK14-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK14-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK14-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK14-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK14-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK14-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK14-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK14-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK14-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -11485,310 +11441,276 @@ // CHECK14-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK14-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK14-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK14-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK14-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK14-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK14-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK14-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK14-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK14-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK14-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK14-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK14-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK14-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK14-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK14-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK14-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK14-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK14-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK14-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK14-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK14-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK14-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK14-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK14-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: // CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK14: omp_offload.cont: -// CHECK14-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK14-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK14-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK14-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK14-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK14-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK14-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK14-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK14-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK14-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK14-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK14-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK14-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK14-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK14-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK14-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK14-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK14-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK14-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK14-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK14-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK14-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK14-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK14-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK14-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK14-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK14-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK14-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK14-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK14-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK14-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK14: omp_offload.failed15: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK14: omp_offload.cont16: +// CHECK14-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK14-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK14-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK14-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK14-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK14-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK14-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK14-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK14-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK14-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK14-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK14-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK14: omp_offload.failed16: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK14: omp_offload.cont17: -// CHECK14-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK14-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK14-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK14-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK14-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK14-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK14-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK14-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK14-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK14-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK14-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK14-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK14-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK14-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK14-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK14-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK14-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK14-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK14-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK14-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK14-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK14-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK14-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK14-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK14-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK14-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK14-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK14-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK14-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK14-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK14-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK14-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK14-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK14-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK14-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK14-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK14-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK14-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK14-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK14-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK14-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK14-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK14-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK14-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK14-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK14-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK14-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK14-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK14-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK14-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK14-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK14-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK14-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK14-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK14-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK14-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK14: omp_offload.failed32: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK14: omp_offload.cont33: -// CHECK14-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK14-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK14-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK14-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK14-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK14-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK14-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK14-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK14-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK14-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK14-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK14-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK14-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK14-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK14-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK14-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK14-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK14-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK14-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK14-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK14-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK14-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK14-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK14-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK14-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK14-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK14-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK14: omp_offload.failed47: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK14: omp_offload.cont48: -// CHECK14-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK14-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK14-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK14-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK14-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK14-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK14-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK14-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK14-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK14-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK14-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK14-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK14-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK14-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK14-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK14-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK14-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK14-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK14-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK14-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK14-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK14-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK14-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK14-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK14-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK14-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK14-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK14-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK14-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK14-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK14-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK14-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK14-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK14-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK14-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK14-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK14-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK14-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK14-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK14-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK14-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK14-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK14-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK14-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK14-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK14-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK14-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK14-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK14-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK14: omp_offload.failed64: -// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK14: omp_offload.cont65: -// CHECK14-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK14-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK14-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK14-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK14-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK14-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK14-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK14-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK14-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK14-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK14-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK14: omp_offload.failed30: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK14: omp_offload.cont31: +// CHECK14-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK14-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK14-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK14-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK14-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK14-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK14-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK14-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK14-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK14-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK14-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK14-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK14-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK14-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK14-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK14-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK14-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK14-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK14-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK14-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK14-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK14-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK14-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK14-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK14-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK14: omp_offload.failed44: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK14: omp_offload.cont45: +// CHECK14-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK14-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK14-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK14-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK14-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK14-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK14-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK14-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK14-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK14-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK14-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK14-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK14-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK14-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK14-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK14-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK14-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK14-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK14-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK14-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK14-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK14-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK14-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK14-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK14-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK14-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK14-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK14-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK14-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK14-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK14-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK14-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK14-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK14-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK14-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK14-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK14-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK14-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK14-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK14-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK14-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK14-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK14-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK14-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK14-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK14-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK14: omp_offload.failed60: +// CHECK14-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK14-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK14: omp_offload.cont61: +// CHECK14-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK14-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK14-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK14-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK14-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK14-NEXT: ret i32 [[TMP171]] +// CHECK14-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK14-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK14-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK14-NEXT: ret i32 [[TMP149]] // // // CHECK14-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -12286,11 +12208,11 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12372,7 +12294,7 @@ // CHECK14-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4, !llvm.access.group !28 // CHECK14-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -12426,7 +12348,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12558,11 +12480,11 @@ // CHECK14-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK14-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK14-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12634,7 +12556,7 @@ // CHECK14-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK14-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34 // CHECK14-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -12665,7 +12587,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12801,11 +12723,11 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK14-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -12885,7 +12807,7 @@ // CHECK14-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4, !llvm.access.group !40 // CHECK14-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -12916,7 +12838,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13074,7 +12996,7 @@ // CHECK14-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK14-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK14-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK14: omp_offload.failed: @@ -13092,7 +13014,7 @@ // CHECK14-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK14-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK14-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK14: omp_offload.failed5: @@ -13122,7 +13044,7 @@ // CHECK14-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK14-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK14-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK14: omp_offload.failed11: @@ -13140,7 +13062,7 @@ // CHECK14-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK14-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK14-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK14: omp_offload.failed17: @@ -13170,7 +13092,7 @@ // CHECK14-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK14-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK14-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK14-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK14-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK14-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK14-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK14: omp_offload.failed25: @@ -13186,11 +13108,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13238,7 +13160,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -13260,7 +13182,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13350,11 +13272,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13402,7 +13324,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -13424,7 +13346,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13525,11 +13447,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13585,7 +13507,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -13607,7 +13529,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13722,11 +13644,11 @@ // CHECK14-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK14-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK14-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13774,7 +13696,7 @@ // CHECK14-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK14-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK14-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -13796,7 +13718,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13895,11 +13817,11 @@ // CHECK14-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK14-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -13955,7 +13877,7 @@ // CHECK14-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK14-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK14-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK14-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK14-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK14: omp.inner.for.inc: // CHECK14-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -13977,7 +13899,7 @@ // CHECK14-NEXT: ret void // // -// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK14-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK14-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK14-NEXT: entry: // CHECK14-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -14084,7 +14006,6 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK15-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -14092,36 +14013,32 @@ // CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK15-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK15-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK15-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK15-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK15-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK15-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK15-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK15-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK15-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK15-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK15-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK15-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK15-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK15-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK15-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK15-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK15-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK15-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK15-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -14144,310 +14061,276 @@ // CHECK15-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK15-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK15-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK15-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK15-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK15-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK15-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK15-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK15-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK15-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK15-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK15-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK15-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK15-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK15-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK15-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK15-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK15-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK15-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK15-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK15-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK15-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK15-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK15-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK15-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK15-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK15-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: // CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK15: omp_offload.cont: -// CHECK15-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK15-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK15-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK15-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK15-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK15-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK15-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK15-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK15-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK15-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK15-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK15-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK15-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK15-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK15-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK15-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK15-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK15-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK15-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK15-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK15-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK15-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK15-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK15-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK15-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK15-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK15-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK15-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK15-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK15-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK15-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK15-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK15-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK15-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK15-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK15-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK15-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK15-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK15-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK15-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK15: omp_offload.failed15: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK15: omp_offload.cont16: +// CHECK15-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK15-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK15-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK15-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK15-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK15-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK15-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK15-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK15-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK15-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK15-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK15-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK15-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK15: omp_offload.failed16: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK15: omp_offload.cont17: -// CHECK15-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK15-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK15-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK15-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK15-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK15-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK15-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK15-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK15-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK15-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK15-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK15-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK15-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK15-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK15-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK15-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK15-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK15-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK15-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK15-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK15-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK15-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK15-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK15-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK15-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK15-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK15-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK15-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK15-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK15-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK15-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK15-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK15-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK15-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK15-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK15-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK15-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK15-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK15-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK15-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK15-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK15-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK15-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK15-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK15-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK15-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK15-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK15-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK15-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK15-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK15-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK15-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK15-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK15-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK15-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK15-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK15-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK15-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK15-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK15-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK15-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK15-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK15-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK15: omp_offload.failed32: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK15: omp_offload.cont33: -// CHECK15-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK15-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK15-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK15-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK15-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK15-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK15-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK15-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK15-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK15-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK15-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK15-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK15-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK15-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK15-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK15-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK15-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK15-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK15-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK15-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK15-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK15-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK15-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK15-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK15-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK15-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK15-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK15-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK15-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK15-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK15-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK15: omp_offload.failed47: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK15: omp_offload.cont48: -// CHECK15-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK15-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK15-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK15-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK15-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK15-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK15-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK15-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK15-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK15-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK15-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK15-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK15-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK15-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK15-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK15-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK15-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK15-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK15-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK15-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK15-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK15-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK15-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK15-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK15-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK15-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK15-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK15-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK15-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK15-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK15-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK15-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK15-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK15-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK15-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK15-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK15-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK15-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK15-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK15-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK15-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK15-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK15-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK15-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK15-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK15-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK15-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK15-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK15-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK15-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK15-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK15-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK15-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK15: omp_offload.failed64: -// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK15: omp_offload.cont65: -// CHECK15-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK15-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK15-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK15-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK15-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK15-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK15-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK15-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK15-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK15-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK15-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK15-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK15-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK15: omp_offload.failed30: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK15: omp_offload.cont31: +// CHECK15-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK15-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK15-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK15-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK15-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK15-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK15-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK15-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK15-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK15-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK15-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK15-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK15-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK15-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK15-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK15-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK15-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK15-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK15-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK15-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK15-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK15-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK15-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK15-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK15-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK15-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK15-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK15: omp_offload.failed44: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK15: omp_offload.cont45: +// CHECK15-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK15-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK15-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK15-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK15-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK15-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK15-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK15-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK15-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK15-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK15-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK15-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK15-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK15-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK15-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK15-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK15-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK15-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK15-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK15-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK15-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK15-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK15-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK15-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK15-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK15-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK15-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK15-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK15-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK15-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK15-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK15-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK15-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK15-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK15-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK15-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK15-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK15-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK15-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK15-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK15-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK15-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK15-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK15-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK15-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK15-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK15-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK15: omp_offload.failed60: +// CHECK15-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK15-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK15: omp_offload.cont61: +// CHECK15-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK15-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK15-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK15-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK15-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK15-NEXT: ret i32 [[TMP171]] +// CHECK15-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK15-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK15-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK15-NEXT: ret i32 [[TMP149]] // // // CHECK15-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -14945,11 +14828,11 @@ // CHECK15-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK15-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -15031,7 +14914,7 @@ // CHECK15-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4, !llvm.access.group !28 // CHECK15-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -15085,7 +14968,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -15217,11 +15100,11 @@ // CHECK15-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK15-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK15-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -15293,7 +15176,7 @@ // CHECK15-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK15-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34 // CHECK15-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -15324,7 +15207,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -15460,11 +15343,11 @@ // CHECK15-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK15-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -15544,7 +15427,7 @@ // CHECK15-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4, !llvm.access.group !40 // CHECK15-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -15575,7 +15458,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -15733,7 +15616,7 @@ // CHECK15-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK15-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK15-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK15: omp_offload.failed: @@ -15751,7 +15634,7 @@ // CHECK15-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK15-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK15-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK15: omp_offload.failed5: @@ -15781,7 +15664,7 @@ // CHECK15-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK15-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK15-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK15: omp_offload.failed11: @@ -15799,7 +15682,7 @@ // CHECK15-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK15-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK15-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK15: omp_offload.failed17: @@ -15829,7 +15712,7 @@ // CHECK15-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK15-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK15-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK15-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK15-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK15-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK15-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK15: omp_offload.failed25: @@ -15845,11 +15728,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -15897,7 +15780,7 @@ // CHECK15-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK15-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -15919,7 +15802,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16009,11 +15892,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16061,7 +15944,7 @@ // CHECK15-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK15-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -16083,7 +15966,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16184,11 +16067,11 @@ // CHECK15-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16244,7 +16127,7 @@ // CHECK15-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK15-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -16266,7 +16149,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16381,11 +16264,11 @@ // CHECK15-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK15-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK15-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16433,7 +16316,7 @@ // CHECK15-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK15-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK15-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -16455,7 +16338,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16554,11 +16437,11 @@ // CHECK15-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK15-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16614,7 +16497,7 @@ // CHECK15-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK15-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK15-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK15-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK15-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK15: omp.inner.for.inc: // CHECK15-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -16636,7 +16519,7 @@ // CHECK15-NEXT: ret void // // -// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK15-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK15-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK15-NEXT: entry: // CHECK15-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -16743,7 +16626,6 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK16-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -16751,36 +16633,32 @@ // CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK16-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK16-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 // CHECK16-NEXT: [[M_CASTED:%.*]] = alloca i64, align 8 -// CHECK16-NEXT: [[N_CASTED19:%.*]] = alloca i64, align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS21:%.*]] = alloca [4 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS22:%.*]] = alloca [4 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS23:%.*]] = alloca [4 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES24:%.*]] = alloca [4 x i64], align 8 -// CHECK16-NEXT: [[_TMP25:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_27:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[N_CASTED34:%.*]] = alloca i64, align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS36:%.*]] = alloca [3 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS37:%.*]] = alloca [3 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS38:%.*]] = alloca [3 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES39:%.*]] = alloca [3 x i64], align 8 -// CHECK16-NEXT: [[_TMP40:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_41:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_42:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[M_CASTED49:%.*]] = alloca i64, align 8 -// CHECK16-NEXT: [[N_CASTED51:%.*]] = alloca i64, align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS53:%.*]] = alloca [4 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_PTRS54:%.*]] = alloca [4 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS55:%.*]] = alloca [4 x i8*], align 8 -// CHECK16-NEXT: [[DOTOFFLOAD_SIZES56:%.*]] = alloca [4 x i64], align 8 -// CHECK16-NEXT: [[_TMP57:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_58:%.*]] = alloca i32, align 4 -// CHECK16-NEXT: [[DOTCAPTURE_EXPR_59:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [4 x i8*], align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [4 x i8*], align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [4 x i8*], align 8 +// CHECK16-NEXT: [[_TMP23:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[N_CASTED32:%.*]] = alloca i64, align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS34:%.*]] = alloca [3 x i8*], align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS35:%.*]] = alloca [3 x i8*], align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS36:%.*]] = alloca [3 x i8*], align 8 +// CHECK16-NEXT: [[_TMP37:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_39:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[M_CASTED46:%.*]] = alloca i64, align 8 +// CHECK16-NEXT: [[N_CASTED48:%.*]] = alloca i64, align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_BASEPTRS50:%.*]] = alloca [4 x i8*], align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_PTRS51:%.*]] = alloca [4 x i8*], align 8 +// CHECK16-NEXT: [[DOTOFFLOAD_MAPPERS52:%.*]] = alloca [4 x i8*], align 8 +// CHECK16-NEXT: [[_TMP53:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_54:%.*]] = alloca i32, align 4 +// CHECK16-NEXT: [[DOTCAPTURE_EXPR_55:%.*]] = alloca i32, align 4 // CHECK16-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK16-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK16-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -16803,310 +16681,276 @@ // CHECK16-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK16-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK16-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK16-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK16-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK16-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK16-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK16-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK16-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK16-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK16-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK16-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK16-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK16-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK16-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK16-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK16-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK16-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK16-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK16-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK16-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK16-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK16-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK16-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK16-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK16-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK16-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: // CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK16: omp_offload.cont: -// CHECK16-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK16-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK16-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK16-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK16-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK16-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK16-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK16-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK16-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK16-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK16-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK16-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK16-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK16-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK16-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK16-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK16-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK16-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK16-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK16-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK16-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK16-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK16-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK16-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK16-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK16-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK16-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK16-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK16-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 8 +// CHECK16-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK16-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK16-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK16-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK16-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK16-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK16-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK16-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK16-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP52]]) +// CHECK16-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK16-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK16: omp_offload.failed15: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK16: omp_offload.cont16: +// CHECK16-NEXT: [[TMP55:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: [[CONV17:%.*]] = bitcast i64* [[M_CASTED]] to i32* +// CHECK16-NEXT: store i32 [[TMP55]], i32* [[CONV17]], align 4 +// CHECK16-NEXT: [[TMP56:%.*]] = load i64, i64* [[M_CASTED]], align 8 // CHECK16-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK16-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK16-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK16-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK16-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK16-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK16-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK16-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP60]]) -// CHECK16-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK16-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK16: omp_offload.failed16: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK16: omp_offload.cont17: -// CHECK16-NEXT: [[TMP63:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: [[CONV18:%.*]] = bitcast i64* [[M_CASTED]] to i32* -// CHECK16-NEXT: store i32 [[TMP63]], i32* [[CONV18]], align 4 -// CHECK16-NEXT: [[TMP64:%.*]] = load i64, i64* [[M_CASTED]], align 8 -// CHECK16-NEXT: [[TMP65:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: [[CONV20:%.*]] = bitcast i64* [[N_CASTED19]] to i32* -// CHECK16-NEXT: store i32 [[TMP65]], i32* [[CONV20]], align 4 -// CHECK16-NEXT: [[TMP66:%.*]] = load i64, i64* [[N_CASTED19]], align 8 -// CHECK16-NEXT: [[TMP67:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK16-NEXT: [[TMP68:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK16-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK16-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 +// CHECK16-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* +// CHECK16-NEXT: store i32 [[TMP57]], i32* [[CONV19]], align 4 +// CHECK16-NEXT: [[TMP58:%.*]] = load i64, i64* [[N_CASTED18]], align 8 +// CHECK16-NEXT: [[TMP59:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK16-NEXT: [[TMP60:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK16-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK16-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i64* +// CHECK16-NEXT: store i64 [[TMP56]], i64* [[TMP63]], align 8 +// CHECK16-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP64]], align 8 +// CHECK16-NEXT: [[TMP65:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK16-NEXT: store i64 [[TMP58]], i64* [[TMP66]], align 8 +// CHECK16-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK16-NEXT: store i64 [[TMP58]], i64* [[TMP68]], align 8 +// CHECK16-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP69]], align 8 +// CHECK16-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 // CHECK16-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i64* -// CHECK16-NEXT: store i64 [[TMP64]], i64* [[TMP71]], align 8 -// CHECK16-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP72]], align 8 -// CHECK16-NEXT: [[TMP73:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP73]], align 8 -// CHECK16-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK16-NEXT: store i64 [[TMP66]], i64* [[TMP75]], align 8 -// CHECK16-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK16-NEXT: store i64 [[TMP66]], i64* [[TMP77]], align 8 -// CHECK16-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP78]], align 8 -// CHECK16-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 1 +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP71]], align 8 +// CHECK16-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 +// CHECK16-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP74]], align 8 +// CHECK16-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP76:%.*]] = bitcast i8** [[TMP75]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP76]], align 8 +// CHECK16-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 8 +// CHECK16-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 8 +// CHECK16-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 3 // CHECK16-NEXT: store i8* null, i8** [[TMP79]], align 8 -// CHECK16-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP81]], align 8 -// CHECK16-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP83]], align 8 -// CHECK16-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 2 -// CHECK16-NEXT: store i64 8, i64* [[TMP84]], align 8 -// CHECK16-NEXT: [[TMP85:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP85]], align 8 -// CHECK16-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP87:%.*]] = bitcast i8** [[TMP86]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP87]], align 8 -// CHECK16-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 8 -// CHECK16-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 3 -// CHECK16-NEXT: store i64 [[TMP67]], i64* [[TMP90]], align 8 -// CHECK16-NEXT: [[TMP91:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS23]], i64 0, i64 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP91]], align 8 -// CHECK16-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES24]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP95:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP95]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK16-NEXT: [[TMP96:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK16-NEXT: [[SUB28:%.*]] = sub nsw i32 [[TMP96]], 0 -// CHECK16-NEXT: [[DIV29:%.*]] = sdiv i32 [[SUB28]], 1 -// CHECK16-NEXT: [[SUB30:%.*]] = sub nsw i32 [[DIV29]], 1 -// CHECK16-NEXT: store i32 [[SUB30]], i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK16-NEXT: [[TMP97:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_27]], align 4 -// CHECK16-NEXT: [[ADD31:%.*]] = add nsw i32 [[TMP97]], 1 -// CHECK16-NEXT: [[TMP98:%.*]] = zext i32 [[ADD31]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP98]]) -// CHECK16-NEXT: [[TMP99:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP92]], i8** [[TMP93]], i64* [[TMP94]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP100:%.*]] = icmp ne i32 [[TMP99]], 0 -// CHECK16-NEXT: br i1 [[TMP100]], label [[OMP_OFFLOAD_FAILED32:%.*]], label [[OMP_OFFLOAD_CONT33:%.*]] -// CHECK16: omp_offload.failed32: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP64]], i64 [[TMP66]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT33]] -// CHECK16: omp_offload.cont33: -// CHECK16-NEXT: [[TMP101:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: [[CONV35:%.*]] = bitcast i64* [[N_CASTED34]] to i32* -// CHECK16-NEXT: store i32 [[TMP101]], i32* [[CONV35]], align 4 -// CHECK16-NEXT: [[TMP102:%.*]] = load i64, i64* [[N_CASTED34]], align 8 -// CHECK16-NEXT: [[TMP103:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK16-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i64* -// CHECK16-NEXT: store i64 [[TMP102]], i64* [[TMP105]], align 8 -// CHECK16-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i64* -// CHECK16-NEXT: store i64 [[TMP102]], i64* [[TMP107]], align 8 -// CHECK16-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP108]], align 8 -// CHECK16-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP109]], align 8 -// CHECK16-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP111:%.*]] = bitcast i8** [[TMP110]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP111]], align 8 -// CHECK16-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP113:%.*]] = bitcast i8** [[TMP112]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP113]], align 8 -// CHECK16-NEXT: [[TMP114:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 1 -// CHECK16-NEXT: store i64 8, i64* [[TMP114]], align 8 -// CHECK16-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP115]], align 8 -// CHECK16-NEXT: [[TMP116:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP117:%.*]] = bitcast i8** [[TMP116]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP117]], align 8 -// CHECK16-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP119:%.*]] = bitcast i8** [[TMP118]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP119]], align 8 -// CHECK16-NEXT: [[TMP120:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 2 -// CHECK16-NEXT: store i64 [[TMP103]], i64* [[TMP120]], align 8 -// CHECK16-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS38]], i64 0, i64 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP121]], align 8 -// CHECK16-NEXT: [[TMP122:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS36]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS37]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES39]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP125:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP125]], i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK16-NEXT: [[TMP126:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_41]], align 4 -// CHECK16-NEXT: [[SUB43:%.*]] = sub nsw i32 [[TMP126]], 0 -// CHECK16-NEXT: [[DIV44:%.*]] = sdiv i32 [[SUB43]], 1 -// CHECK16-NEXT: [[SUB45:%.*]] = sub nsw i32 [[DIV44]], 1 -// CHECK16-NEXT: store i32 [[SUB45]], i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK16-NEXT: [[TMP127:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_42]], align 4 -// CHECK16-NEXT: [[ADD46:%.*]] = add nsw i32 [[TMP127]], 1 -// CHECK16-NEXT: [[TMP128:%.*]] = zext i32 [[ADD46]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP128]]) -// CHECK16-NEXT: [[TMP129:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP122]], i8** [[TMP123]], i64* [[TMP124]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP130:%.*]] = icmp ne i32 [[TMP129]], 0 -// CHECK16-NEXT: br i1 [[TMP130]], label [[OMP_OFFLOAD_FAILED47:%.*]], label [[OMP_OFFLOAD_CONT48:%.*]] -// CHECK16: omp_offload.failed47: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP102]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT48]] -// CHECK16: omp_offload.cont48: -// CHECK16-NEXT: [[TMP131:%.*]] = load i32, i32* [[M]], align 4 -// CHECK16-NEXT: [[CONV50:%.*]] = bitcast i64* [[M_CASTED49]] to i32* -// CHECK16-NEXT: store i32 [[TMP131]], i32* [[CONV50]], align 4 -// CHECK16-NEXT: [[TMP132:%.*]] = load i64, i64* [[M_CASTED49]], align 8 -// CHECK16-NEXT: [[TMP133:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: [[CONV52:%.*]] = bitcast i64* [[N_CASTED51]] to i32* -// CHECK16-NEXT: store i32 [[TMP133]], i32* [[CONV52]], align 4 -// CHECK16-NEXT: [[TMP134:%.*]] = load i64, i64* [[N_CASTED51]], align 8 -// CHECK16-NEXT: [[TMP135:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK16-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i64* -// CHECK16-NEXT: store i64 [[TMP132]], i64* [[TMP137]], align 8 -// CHECK16-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i64* -// CHECK16-NEXT: store i64 [[TMP132]], i64* [[TMP139]], align 8 -// CHECK16-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK16-NEXT: store i64 4, i64* [[TMP140]], align 8 -// CHECK16-NEXT: [[TMP141:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 0 -// CHECK16-NEXT: store i8* null, i8** [[TMP141]], align 8 -// CHECK16-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i64* -// CHECK16-NEXT: store i64 [[TMP134]], i64* [[TMP143]], align 8 -// CHECK16-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 1 -// CHECK16-NEXT: [[TMP145:%.*]] = bitcast i8** [[TMP144]] to i64* -// CHECK16-NEXT: store i64 [[TMP134]], i64* [[TMP145]], align 8 -// CHECK16-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 1 -// CHECK16-NEXT: store i64 4, i64* [[TMP146]], align 8 -// CHECK16-NEXT: [[TMP147:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 1 -// CHECK16-NEXT: store i8* null, i8** [[TMP147]], align 8 -// CHECK16-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP149]], align 8 -// CHECK16-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 2 -// CHECK16-NEXT: [[TMP151:%.*]] = bitcast i8** [[TMP150]] to i64* -// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP151]], align 8 -// CHECK16-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 2 -// CHECK16-NEXT: store i64 8, i64* [[TMP152]], align 8 -// CHECK16-NEXT: [[TMP153:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 2 -// CHECK16-NEXT: store i8* null, i8** [[TMP153]], align 8 -// CHECK16-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP155]], align 8 -// CHECK16-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 3 -// CHECK16-NEXT: [[TMP157:%.*]] = bitcast i8** [[TMP156]] to i32** -// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP157]], align 8 -// CHECK16-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 3 -// CHECK16-NEXT: store i64 [[TMP135]], i64* [[TMP158]], align 8 -// CHECK16-NEXT: [[TMP159:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS55]], i64 0, i64 3 -// CHECK16-NEXT: store i8* null, i8** [[TMP159]], align 8 -// CHECK16-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS53]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP161:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS54]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES56]], i32 0, i32 0 -// CHECK16-NEXT: [[TMP163:%.*]] = load i32, i32* [[N]], align 4 -// CHECK16-NEXT: store i32 [[TMP163]], i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK16-NEXT: [[TMP164:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_58]], align 4 -// CHECK16-NEXT: [[SUB60:%.*]] = sub nsw i32 [[TMP164]], 0 -// CHECK16-NEXT: [[DIV61:%.*]] = sdiv i32 [[SUB60]], 1 -// CHECK16-NEXT: [[SUB62:%.*]] = sub nsw i32 [[DIV61]], 1 -// CHECK16-NEXT: store i32 [[SUB62]], i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK16-NEXT: [[TMP165:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_59]], align 4 -// CHECK16-NEXT: [[ADD63:%.*]] = add nsw i32 [[TMP165]], 1 -// CHECK16-NEXT: [[TMP166:%.*]] = zext i32 [[ADD63]] to i64 -// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP166]]) -// CHECK16-NEXT: [[TMP167:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP160]], i8** [[TMP161]], i64* [[TMP162]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK16-NEXT: [[TMP168:%.*]] = icmp ne i32 [[TMP167]], 0 -// CHECK16-NEXT: br i1 [[TMP168]], label [[OMP_OFFLOAD_FAILED64:%.*]], label [[OMP_OFFLOAD_CONT65:%.*]] -// CHECK16: omp_offload.failed64: -// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP132]], i64 [[TMP134]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT65]] -// CHECK16: omp_offload.cont65: -// CHECK16-NEXT: [[TMP169:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK16-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP169]]) +// CHECK16-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP82:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP82]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK16-NEXT: [[TMP83:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK16-NEXT: [[SUB26:%.*]] = sub nsw i32 [[TMP83]], 0 +// CHECK16-NEXT: [[DIV27:%.*]] = sdiv i32 [[SUB26]], 1 +// CHECK16-NEXT: [[SUB28:%.*]] = sub nsw i32 [[DIV27]], 1 +// CHECK16-NEXT: store i32 [[SUB28]], i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK16-NEXT: [[TMP84:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 +// CHECK16-NEXT: [[ADD29:%.*]] = add nsw i32 [[TMP84]], 1 +// CHECK16-NEXT: [[TMP85:%.*]] = zext i32 [[ADD29]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP85]]) +// CHECK16-NEXT: [[TMP86:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP80]], i8** [[TMP81]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP87:%.*]] = icmp ne i32 [[TMP86]], 0 +// CHECK16-NEXT: br i1 [[TMP87]], label [[OMP_OFFLOAD_FAILED30:%.*]], label [[OMP_OFFLOAD_CONT31:%.*]] +// CHECK16: omp_offload.failed30: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i64 [[TMP56]], i64 [[TMP58]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT31]] +// CHECK16: omp_offload.cont31: +// CHECK16-NEXT: [[TMP88:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: [[CONV33:%.*]] = bitcast i64* [[N_CASTED32]] to i32* +// CHECK16-NEXT: store i32 [[TMP88]], i32* [[CONV33]], align 4 +// CHECK16-NEXT: [[TMP89:%.*]] = load i64, i64* [[N_CASTED32]], align 8 +// CHECK16-NEXT: [[TMP90:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK16-NEXT: [[TMP91:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP92:%.*]] = bitcast i8** [[TMP91]] to i64* +// CHECK16-NEXT: store i64 [[TMP89]], i64* [[TMP92]], align 8 +// CHECK16-NEXT: [[TMP93:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP94:%.*]] = bitcast i8** [[TMP93]] to i64* +// CHECK16-NEXT: store i64 [[TMP89]], i64* [[TMP94]], align 8 +// CHECK16-NEXT: [[TMP95:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP95]], align 8 +// CHECK16-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP97]], align 8 +// CHECK16-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP99:%.*]] = bitcast i8** [[TMP98]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP99]], align 8 +// CHECK16-NEXT: [[TMP100:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP100]], align 8 +// CHECK16-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP102]], align 8 +// CHECK16-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP104:%.*]] = bitcast i8** [[TMP103]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP104]], align 8 +// CHECK16-NEXT: store i64 [[TMP90]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 8 +// CHECK16-NEXT: [[TMP105:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS36]], i64 0, i64 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP105]], align 8 +// CHECK16-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS34]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS35]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP108:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP108]], i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK16-NEXT: [[TMP109:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 +// CHECK16-NEXT: [[SUB40:%.*]] = sub nsw i32 [[TMP109]], 0 +// CHECK16-NEXT: [[DIV41:%.*]] = sdiv i32 [[SUB40]], 1 +// CHECK16-NEXT: [[SUB42:%.*]] = sub nsw i32 [[DIV41]], 1 +// CHECK16-NEXT: store i32 [[SUB42]], i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK16-NEXT: [[TMP110:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_39]], align 4 +// CHECK16-NEXT: [[ADD43:%.*]] = add nsw i32 [[TMP110]], 1 +// CHECK16-NEXT: [[TMP111:%.*]] = zext i32 [[ADD43]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP111]]) +// CHECK16-NEXT: [[TMP112:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP106]], i8** [[TMP107]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP113:%.*]] = icmp ne i32 [[TMP112]], 0 +// CHECK16-NEXT: br i1 [[TMP113]], label [[OMP_OFFLOAD_FAILED44:%.*]], label [[OMP_OFFLOAD_CONT45:%.*]] +// CHECK16: omp_offload.failed44: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i64 [[TMP89]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT45]] +// CHECK16: omp_offload.cont45: +// CHECK16-NEXT: [[TMP114:%.*]] = load i32, i32* [[M]], align 4 +// CHECK16-NEXT: [[CONV47:%.*]] = bitcast i64* [[M_CASTED46]] to i32* +// CHECK16-NEXT: store i32 [[TMP114]], i32* [[CONV47]], align 4 +// CHECK16-NEXT: [[TMP115:%.*]] = load i64, i64* [[M_CASTED46]], align 8 +// CHECK16-NEXT: [[TMP116:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: [[CONV49:%.*]] = bitcast i64* [[N_CASTED48]] to i32* +// CHECK16-NEXT: store i32 [[TMP116]], i32* [[CONV49]], align 4 +// CHECK16-NEXT: [[TMP117:%.*]] = load i64, i64* [[N_CASTED48]], align 8 +// CHECK16-NEXT: [[TMP118:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK16-NEXT: [[TMP119:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i64* +// CHECK16-NEXT: store i64 [[TMP115]], i64* [[TMP120]], align 8 +// CHECK16-NEXT: [[TMP121:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i64* +// CHECK16-NEXT: store i64 [[TMP115]], i64* [[TMP122]], align 8 +// CHECK16-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 0 +// CHECK16-NEXT: store i8* null, i8** [[TMP123]], align 8 +// CHECK16-NEXT: [[TMP124:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP125:%.*]] = bitcast i8** [[TMP124]] to i64* +// CHECK16-NEXT: store i64 [[TMP117]], i64* [[TMP125]], align 8 +// CHECK16-NEXT: [[TMP126:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 1 +// CHECK16-NEXT: [[TMP127:%.*]] = bitcast i8** [[TMP126]] to i64* +// CHECK16-NEXT: store i64 [[TMP117]], i64* [[TMP127]], align 8 +// CHECK16-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 1 +// CHECK16-NEXT: store i8* null, i8** [[TMP128]], align 8 +// CHECK16-NEXT: [[TMP129:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP130:%.*]] = bitcast i8** [[TMP129]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP130]], align 8 +// CHECK16-NEXT: [[TMP131:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 2 +// CHECK16-NEXT: [[TMP132:%.*]] = bitcast i8** [[TMP131]] to i64* +// CHECK16-NEXT: store i64 [[TMP1]], i64* [[TMP132]], align 8 +// CHECK16-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 2 +// CHECK16-NEXT: store i8* null, i8** [[TMP133]], align 8 +// CHECK16-NEXT: [[TMP134:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP135:%.*]] = bitcast i8** [[TMP134]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP135]], align 8 +// CHECK16-NEXT: [[TMP136:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 3 +// CHECK16-NEXT: [[TMP137:%.*]] = bitcast i8** [[TMP136]] to i32** +// CHECK16-NEXT: store i32* [[VLA]], i32** [[TMP137]], align 8 +// CHECK16-NEXT: store i64 [[TMP118]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 8 +// CHECK16-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS52]], i64 0, i64 3 +// CHECK16-NEXT: store i8* null, i8** [[TMP138]], align 8 +// CHECK16-NEXT: [[TMP139:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS50]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS51]], i32 0, i32 0 +// CHECK16-NEXT: [[TMP141:%.*]] = load i32, i32* [[N]], align 4 +// CHECK16-NEXT: store i32 [[TMP141]], i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK16-NEXT: [[TMP142:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_54]], align 4 +// CHECK16-NEXT: [[SUB56:%.*]] = sub nsw i32 [[TMP142]], 0 +// CHECK16-NEXT: [[DIV57:%.*]] = sdiv i32 [[SUB56]], 1 +// CHECK16-NEXT: [[SUB58:%.*]] = sub nsw i32 [[DIV57]], 1 +// CHECK16-NEXT: store i32 [[SUB58]], i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK16-NEXT: [[TMP143:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_55]], align 4 +// CHECK16-NEXT: [[ADD59:%.*]] = add nsw i32 [[TMP143]], 1 +// CHECK16-NEXT: [[TMP144:%.*]] = zext i32 [[ADD59]] to i64 +// CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP144]]) +// CHECK16-NEXT: [[TMP145:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP139]], i8** [[TMP140]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP146:%.*]] = icmp ne i32 [[TMP145]], 0 +// CHECK16-NEXT: br i1 [[TMP146]], label [[OMP_OFFLOAD_FAILED60:%.*]], label [[OMP_OFFLOAD_CONT61:%.*]] +// CHECK16: omp_offload.failed60: +// CHECK16-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i64 [[TMP115]], i64 [[TMP117]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK16-NEXT: br label [[OMP_OFFLOAD_CONT61]] +// CHECK16: omp_offload.cont61: +// CHECK16-NEXT: [[TMP147:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK16-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP147]]) // CHECK16-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK16-NEXT: [[TMP170:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP170]]) -// CHECK16-NEXT: [[TMP171:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK16-NEXT: ret i32 [[TMP171]] +// CHECK16-NEXT: [[TMP148:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK16-NEXT: call void @llvm.stackrestore(i8* [[TMP148]]) +// CHECK16-NEXT: [[TMP149:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK16-NEXT: ret i32 [[TMP149]] // // // CHECK16-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -17604,11 +17448,11 @@ // CHECK16-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK16-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -17690,7 +17534,7 @@ // CHECK16-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP21]], i32* [[CONV7]], align 4, !llvm.access.group !28 // CHECK16-NEXT: [[TMP22:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !28 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i64 [[TMP18]], i64 [[TMP20]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP22]]), !llvm.access.group !28 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP23:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !28 @@ -17744,7 +17588,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -17876,11 +17720,11 @@ // CHECK16-NEXT: [[CONV:%.*]] = bitcast i64* [[N_ADDR]] to i32* // CHECK16-NEXT: [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8 // CHECK16-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -17952,7 +17796,7 @@ // CHECK16-NEXT: [[TMP17:%.*]] = zext i32 [[TMP16]] to i64 // CHECK16-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !34 // CHECK16-NEXT: [[TMP19:%.*]] = zext i32 [[TMP18]] to i64 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !34 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !34 @@ -17983,7 +17827,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18119,11 +17963,11 @@ // CHECK16-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP3]], i32* [[CONV2]], align 4 // CHECK16-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[CONV1]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18203,7 +18047,7 @@ // CHECK16-NEXT: [[CONV7:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP20]], i32* [[CONV7]], align 4, !llvm.access.group !40 // CHECK16-NEXT: [[TMP21:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !40 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, i32*, i64, i32*, i64)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP17]], i64 [[TMP19]], i32* [[TMP0]], i64 [[TMP1]], i32* [[TMP2]], i64 [[TMP21]]), !llvm.access.group !40 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP22:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !40 @@ -18234,7 +18078,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18392,7 +18236,7 @@ // CHECK16-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK16-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK16-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK16: omp_offload.failed: @@ -18410,7 +18254,7 @@ // CHECK16-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK16-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK16-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK16: omp_offload.failed5: @@ -18440,7 +18284,7 @@ // CHECK16-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK16-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK16-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK16: omp_offload.failed11: @@ -18458,7 +18302,7 @@ // CHECK16-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK16-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK16-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK16: omp_offload.failed17: @@ -18488,7 +18332,7 @@ // CHECK16-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS21]], i32 0, i32 0 // CHECK16-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS22]], i32 0, i32 0 // CHECK16-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK16-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK16-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK16-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK16-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED25:%.*]], label [[OMP_OFFLOAD_CONT26:%.*]] // CHECK16: omp_offload.failed25: @@ -18504,11 +18348,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18556,7 +18400,7 @@ // CHECK16-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !46 // CHECK16-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !46 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !46 @@ -18578,7 +18422,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18668,11 +18512,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18720,7 +18564,7 @@ // CHECK16-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !52 // CHECK16-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !52 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !52 @@ -18742,7 +18586,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18843,11 +18687,11 @@ // CHECK16-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK16-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -18903,7 +18747,7 @@ // CHECK16-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !58 // CHECK16-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !58 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !58 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !58 @@ -18925,7 +18769,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19040,11 +18884,11 @@ // CHECK16-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK16-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK16-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19092,7 +18936,7 @@ // CHECK16-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64 // CHECK16-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !64 // CHECK16-NEXT: [[TMP11:%.*]] = zext i32 [[TMP10]] to i64 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]]), !llvm.access.group !64 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !64 @@ -19114,7 +18958,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19213,11 +19057,11 @@ // CHECK16-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP2]], i32* [[CONV1]], align 4 // CHECK16-NEXT: [[TMP3:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i64)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i64 [[TMP3]]) // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19273,7 +19117,7 @@ // CHECK16-NEXT: [[CONV2:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK16-NEXT: store i32 [[TMP12]], i32* [[CONV2]], align 4, !llvm.access.group !70 // CHECK16-NEXT: [[TMP13:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8, !llvm.access.group !70 -// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 +// CHECK16-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, i64, [10 x i32]*, i64)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i64 [[TMP9]], i64 [[TMP11]], [10 x i32]* [[TMP0]], i64 [[TMP13]]), !llvm.access.group !70 // CHECK16-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK16: omp.inner.for.inc: // CHECK16-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !70 @@ -19295,7 +19139,7 @@ // CHECK16-NEXT: ret void // // -// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK16-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK16-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[DOTPREVIOUS_LB_:%.*]], i64 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK16-NEXT: entry: // CHECK16-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -19402,7 +19246,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -19410,36 +19253,32 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK17-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK17-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK17-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK17-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK17-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK17-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK17-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK17-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK17-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK17-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK17-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK17-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -19461,308 +19300,274 @@ // CHECK17-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK17-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK17-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK17-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK17-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK17-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK17-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK17-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK17-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK17-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK17-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK17-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK17-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK17-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: -// CHECK17-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK17-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK17-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK17-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK17-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK17-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK17-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK17-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK17-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK17-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK17-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK17-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK17-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK17-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK17-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK17-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK17-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK17-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK17-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK17-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK17-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK17-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK17: omp_offload.failed14: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK17: omp_offload.cont15: +// CHECK17-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK17-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK17-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK17-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK17-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK17-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK17-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK17-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK17-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK17-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK17-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK17-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK17: omp_offload.failed15: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK17: omp_offload.cont16: -// CHECK17-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK17-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK17-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK17-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK17-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK17-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK17-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK17-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK17-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK17-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK17-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK17-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK17-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK17-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK17-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK17-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK17-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK17-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK17-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK17-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK17-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK17-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK17-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK17-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK17-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK17-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK17-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK17-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK17-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK17-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK17-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK17-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK17-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK17-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK17-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK17-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK17-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK17-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK17-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK17-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK17: omp_offload.failed29: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK17: omp_offload.cont30: -// CHECK17-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK17-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK17-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK17-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK17-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK17-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK17-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK17-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK17-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK17-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK17-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK17-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK17-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK17-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK17-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK17-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK17-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK17-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK17-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK17-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK17-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK17-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK17-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK17-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK17-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK17-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK17-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK17: omp_offload.failed43: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK17: omp_offload.cont44: -// CHECK17-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK17-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK17-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK17-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK17-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK17-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK17-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK17-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK17-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK17-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK17-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK17-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK17-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK17-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK17-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK17-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK17-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK17-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK17-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK17-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK17-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK17-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK17-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK17-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK17-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK17-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK17-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK17-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK17-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK17-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK17-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK17-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK17-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK17-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK17-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK17-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK17-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK17-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK17-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK17: omp_offload.failed58: -// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK17: omp_offload.cont59: -// CHECK17-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK17-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK17-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK17-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK17-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK17-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK17-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK17-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK17-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK17-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK17-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK17-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK17: omp_offload.failed27: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK17: omp_offload.cont28: +// CHECK17-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK17-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK17-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK17-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK17-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK17-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK17-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK17-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK17-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK17-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK17-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK17-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK17-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK17-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK17-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK17-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK17-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK17-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK17-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK17-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK17-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK17-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK17-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK17-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK17-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK17-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK17-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK17: omp_offload.failed40: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK17: omp_offload.cont41: +// CHECK17-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK17-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK17-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK17-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK17-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK17-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK17-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK17-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK17-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK17-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK17-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK17-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK17-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK17-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK17-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK17-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK17-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK17-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK17-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK17-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK17-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK17-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK17-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK17-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK17-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK17-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK17-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK17-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK17-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK17-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK17-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK17-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK17-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK17-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK17-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK17-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK17-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK17-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK17: omp_offload.failed54: +// CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK17: omp_offload.cont55: +// CHECK17-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK17-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK17-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK17-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK17-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK17-NEXT: ret i32 [[TMP175]] +// CHECK17-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK17-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK17-NEXT: ret i32 [[TMP153]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -20245,11 +20050,11 @@ // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK17-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -20327,7 +20132,7 @@ // CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK17-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -20381,7 +20186,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -20508,11 +20313,11 @@ // CHECK17-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK17-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -20582,7 +20387,7 @@ // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35 // CHECK17-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -20613,7 +20418,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -20743,11 +20548,11 @@ // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK17-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK17-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -20823,7 +20628,7 @@ // CHECK17-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK17-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK17-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -20854,7 +20659,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21008,7 +20813,7 @@ // CHECK17-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK17-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: @@ -21026,7 +20831,7 @@ // CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK17-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK17: omp_offload.failed5: @@ -21055,7 +20860,7 @@ // CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK17-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK17: omp_offload.failed11: @@ -21073,7 +20878,7 @@ // CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK17-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK17: omp_offload.failed17: @@ -21102,7 +20907,7 @@ // CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK17-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK17-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK17-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK17: omp_offload.failed24: @@ -21118,11 +20923,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21168,7 +20973,7 @@ // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -21190,7 +20995,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21277,11 +21082,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21327,7 +21132,7 @@ // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -21349,7 +21154,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21445,11 +21250,11 @@ // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21501,7 +21306,7 @@ // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK17-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -21523,7 +21328,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21632,11 +21437,11 @@ // CHECK17-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK17-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK17-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21682,7 +21487,7 @@ // CHECK17: omp.inner.for.body: // CHECK17-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK17-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -21704,7 +21509,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21798,11 +21603,11 @@ // CHECK17-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK17-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK17-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21854,7 +21659,7 @@ // CHECK17-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK17-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK17-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK17-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK17-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK17: omp.inner.for.inc: // CHECK17-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -21876,7 +21681,7 @@ // CHECK17-NEXT: ret void // // -// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK17-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK17-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK17-NEXT: entry: // CHECK17-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -21979,7 +21784,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -21987,36 +21791,32 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK18-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK18-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK18-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK18-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK18-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK18-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK18-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK18-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK18-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK18-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK18-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK18-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK18-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -22038,308 +21838,274 @@ // CHECK18-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK18-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK18-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK18-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK18-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK18-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK18-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK18-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK18-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK18-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK18-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK18-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK18-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK18-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK18: omp_offload.cont: -// CHECK18-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK18-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK18-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK18-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK18-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK18-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK18-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK18-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK18-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK18-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK18-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK18-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK18-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK18-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK18-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK18-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK18-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK18-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK18-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK18-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK18-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK18-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK18-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK18-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK18-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK18-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK18-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK18: omp_offload.failed14: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK18: omp_offload.cont15: +// CHECK18-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK18-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK18-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK18-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK18-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK18-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK18-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK18-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK18-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK18-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK18-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK18-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK18: omp_offload.failed15: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK18: omp_offload.cont16: -// CHECK18-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK18-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK18-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK18-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK18-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK18-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK18-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK18-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK18-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK18-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK18-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK18-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK18-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK18-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK18-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK18-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK18-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK18-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK18-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK18-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK18-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK18-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK18-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK18-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK18-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK18-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK18-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK18-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK18-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK18-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK18-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK18-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK18-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK18-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK18-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK18-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK18-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK18-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK18-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK18-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK18-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK18-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK18-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK18-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK18-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK18-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK18: omp_offload.failed29: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK18: omp_offload.cont30: -// CHECK18-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK18-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK18-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK18-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK18-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK18-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK18-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK18-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK18-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK18-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK18-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK18-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK18-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK18-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK18-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK18-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK18-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK18-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK18-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK18-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK18-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK18-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK18-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK18-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK18-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK18-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK18-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK18: omp_offload.failed43: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK18: omp_offload.cont44: -// CHECK18-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK18-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK18-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK18-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK18-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK18-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK18-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK18-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK18-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK18-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK18-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK18-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK18-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK18-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK18-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK18-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK18-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK18-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK18-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK18-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK18-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK18-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK18-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK18-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK18-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK18-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK18-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK18-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK18-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK18-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK18-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK18-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK18-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK18-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK18-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK18-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK18-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK18-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK18-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK18-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK18: omp_offload.failed58: -// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK18: omp_offload.cont59: -// CHECK18-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK18-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK18-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK18-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK18-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK18-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK18-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK18-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK18-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK18-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK18-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK18-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK18-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK18: omp_offload.failed27: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK18: omp_offload.cont28: +// CHECK18-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK18-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK18-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK18-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK18-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK18-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK18-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK18-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK18-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK18-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK18-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK18-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK18-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK18-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK18-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK18-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK18-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK18-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK18-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK18-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK18-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK18-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK18-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK18-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK18-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK18-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK18-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK18: omp_offload.failed40: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK18: omp_offload.cont41: +// CHECK18-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK18-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK18-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK18-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK18-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK18-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK18-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK18-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK18-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK18-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK18-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK18-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK18-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK18-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK18-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK18-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK18-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK18-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK18-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK18-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK18-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK18-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK18-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK18-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK18-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK18-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK18-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK18-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK18-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK18-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK18-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK18-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK18-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK18-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK18-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK18-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK18-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK18-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK18: omp_offload.failed54: +// CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK18: omp_offload.cont55: +// CHECK18-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK18-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK18-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK18-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK18-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK18-NEXT: ret i32 [[TMP175]] +// CHECK18-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK18-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK18-NEXT: ret i32 [[TMP153]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -22822,11 +22588,11 @@ // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK18-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -22904,7 +22670,7 @@ // CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK18-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -22958,7 +22724,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23085,11 +22851,11 @@ // CHECK18-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK18-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK18-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23159,7 +22925,7 @@ // CHECK18: omp.inner.for.body: // CHECK18-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35 // CHECK18-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -23190,7 +22956,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23320,11 +23086,11 @@ // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK18-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK18-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23400,7 +23166,7 @@ // CHECK18-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK18-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK18-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -23431,7 +23197,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23585,7 +23351,7 @@ // CHECK18-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK18-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: @@ -23603,7 +23369,7 @@ // CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK18-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK18: omp_offload.failed5: @@ -23632,7 +23398,7 @@ // CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK18-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK18: omp_offload.failed11: @@ -23650,7 +23416,7 @@ // CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK18-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK18: omp_offload.failed17: @@ -23679,7 +23445,7 @@ // CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK18-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK18-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK18-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK18: omp_offload.failed24: @@ -23695,11 +23461,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23745,7 +23511,7 @@ // CHECK18: omp.inner.for.body: // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -23767,7 +23533,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23854,11 +23620,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -23904,7 +23670,7 @@ // CHECK18: omp.inner.for.body: // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -23926,7 +23692,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24022,11 +23788,11 @@ // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24078,7 +23844,7 @@ // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK18-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -24100,7 +23866,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24209,11 +23975,11 @@ // CHECK18-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK18-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK18-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24259,7 +24025,7 @@ // CHECK18: omp.inner.for.body: // CHECK18-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK18-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -24281,7 +24047,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24375,11 +24141,11 @@ // CHECK18-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK18-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK18-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24431,7 +24197,7 @@ // CHECK18-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK18-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK18-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK18-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK18-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK18: omp.inner.for.inc: // CHECK18-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -24453,7 +24219,7 @@ // CHECK18-NEXT: ret void // // -// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK18-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK18-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK18-NEXT: entry: // CHECK18-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -24556,7 +24322,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -24564,36 +24329,32 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK19-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK19-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK19-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK19-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK19-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK19-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK19-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK19-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK19-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -24615,308 +24376,274 @@ // CHECK19-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK19-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK19-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK19-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK19-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK19-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK19-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK19-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK19-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK19-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK19-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK19-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK19-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK19-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: -// CHECK19-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK19-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK19-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK19-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK19-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK19-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK19-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK19-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK19-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK19-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK19-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK19-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK19-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK19-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK19-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK19: omp_offload.failed14: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK19: omp_offload.cont15: +// CHECK19-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK19-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK19-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK19-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK19-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK19-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK19-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK19-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK19-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK19-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK19-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK19: omp_offload.failed15: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK19: omp_offload.cont16: -// CHECK19-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK19-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK19-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK19-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK19-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK19-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK19-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK19-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK19-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK19-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK19-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK19-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK19-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK19-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK19-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK19-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK19-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK19-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK19-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK19-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK19-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK19-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK19-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK19-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK19-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK19-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK19-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK19-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK19-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK19-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK19-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK19-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK19-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK19-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK19-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK19-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK19-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK19: omp_offload.failed29: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK19: omp_offload.cont30: -// CHECK19-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK19-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK19-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK19-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK19-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK19-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK19-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK19-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK19-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK19-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK19-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK19-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK19-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK19-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK19-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK19-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK19-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK19-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK19-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK19-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK19-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK19-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK19-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK19-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK19-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK19: omp_offload.failed43: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK19: omp_offload.cont44: -// CHECK19-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK19-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK19-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK19-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK19-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK19-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK19-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK19-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK19-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK19-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK19-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK19-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK19-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK19-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK19-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK19-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK19-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK19-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK19-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK19-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK19-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK19-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK19-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK19-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK19-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK19-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK19-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK19-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK19-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK19-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK19-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK19-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK19-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK19-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK19-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK19-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK19-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK19: omp_offload.failed58: -// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK19: omp_offload.cont59: -// CHECK19-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK19-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK19-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK19-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK19-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK19-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK19-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK19-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK19-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK19-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK19: omp_offload.failed27: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK19: omp_offload.cont28: +// CHECK19-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK19-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK19-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK19-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK19-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK19-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK19-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK19-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK19-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK19-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK19-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK19-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK19-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK19-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK19-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK19-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK19-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK19-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK19-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK19-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK19-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK19-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK19-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK19-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK19: omp_offload.failed40: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK19: omp_offload.cont41: +// CHECK19-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK19-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK19-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK19-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK19-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK19-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK19-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK19-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK19-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK19-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK19-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK19-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK19-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK19-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK19-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK19-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK19-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK19-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK19-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK19-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK19-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK19-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK19-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK19-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK19-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK19-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK19-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK19-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK19-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK19-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK19-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK19-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK19-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK19-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK19-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK19-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK19: omp_offload.failed54: +// CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK19: omp_offload.cont55: +// CHECK19-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK19-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK19-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK19-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK19-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK19-NEXT: ret i32 [[TMP175]] +// CHECK19-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK19-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK19-NEXT: ret i32 [[TMP153]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -25399,11 +25126,11 @@ // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25481,7 +25208,7 @@ // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK19-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -25535,7 +25262,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25662,11 +25389,11 @@ // CHECK19-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK19-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25736,7 +25463,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35 // CHECK19-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -25767,7 +25494,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25897,11 +25624,11 @@ // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -25977,7 +25704,7 @@ // CHECK19-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK19-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK19-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -26008,7 +25735,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26162,7 +25889,7 @@ // CHECK19-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK19-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: @@ -26180,7 +25907,7 @@ // CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK19-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK19: omp_offload.failed5: @@ -26209,7 +25936,7 @@ // CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK19-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK19: omp_offload.failed11: @@ -26227,7 +25954,7 @@ // CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK19-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK19: omp_offload.failed17: @@ -26256,7 +25983,7 @@ // CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK19-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK19-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK19-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK19: omp_offload.failed24: @@ -26272,11 +25999,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26322,7 +26049,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -26344,7 +26071,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26431,11 +26158,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26481,7 +26208,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -26503,7 +26230,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26599,11 +26326,11 @@ // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26655,7 +26382,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -26677,7 +26404,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26786,11 +26513,11 @@ // CHECK19-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK19-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK19-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26836,7 +26563,7 @@ // CHECK19: omp.inner.for.body: // CHECK19-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK19-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -26858,7 +26585,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -26952,11 +26679,11 @@ // CHECK19-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK19-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK19-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27008,7 +26735,7 @@ // CHECK19-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK19-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK19-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK19-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK19-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK19: omp.inner.for.inc: // CHECK19-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -27030,7 +26757,7 @@ // CHECK19-NEXT: ret void // // -// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK19-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK19-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK19-NEXT: entry: // CHECK19-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -27133,7 +26860,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -27141,36 +26867,32 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 // CHECK20-NEXT: [[M_CASTED:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED31:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS32:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS33:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS34:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES35:%.*]] = alloca [3 x i64], align 4 -// CHECK20-NEXT: [[_TMP36:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_37:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_38:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[M_CASTED45:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[N_CASTED46:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS47:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_PTRS48:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS49:%.*]] = alloca [4 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES50:%.*]] = alloca [4 x i64], align 4 -// CHECK20-NEXT: [[_TMP51:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_52:%.*]] = alloca i32, align 4 -// CHECK20-NEXT: [[DOTCAPTURE_EXPR_53:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED29:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS30:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS31:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS32:%.*]] = alloca [3 x i8*], align 4 +// CHECK20-NEXT: [[_TMP33:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_34:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_35:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[M_CASTED42:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[N_CASTED43:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS44:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_PTRS45:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS46:%.*]] = alloca [4 x i8*], align 4 +// CHECK20-NEXT: [[_TMP47:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_48:%.*]] = alloca i32, align 4 +// CHECK20-NEXT: [[DOTCAPTURE_EXPR_49:%.*]] = alloca i32, align 4 // CHECK20-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK20-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK20-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -27192,308 +26914,274 @@ // CHECK20-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK20-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK20-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK20-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK20-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK20-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK20-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK20-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK20-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK20-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK20-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK20-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK20-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK20-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK20: omp_offload.cont: -// CHECK20-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK20-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK20-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK20-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK20-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK20-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK20-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK20-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK20-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK20-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK20-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK20-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK20-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP53]]) +// CHECK20-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.4, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.5, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK20-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK20: omp_offload.failed14: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK20: omp_offload.cont15: +// CHECK20-NEXT: [[TMP56:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP56]], i32* [[M_CASTED]], align 4 +// CHECK20-NEXT: [[TMP57:%.*]] = load i32, i32* [[M_CASTED]], align 4 // CHECK20-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK20-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK20-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK20-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK20-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK20-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK20-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP61]]) -// CHECK20-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK20-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK20: omp_offload.failed15: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l159(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK20: omp_offload.cont16: -// CHECK20-NEXT: [[TMP64:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP64]], i32* [[M_CASTED]], align 4 -// CHECK20-NEXT: [[TMP65:%.*]] = load i32, i32* [[M_CASTED]], align 4 -// CHECK20-NEXT: [[TMP66:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP66]], i32* [[N_CASTED17]], align 4 -// CHECK20-NEXT: [[TMP67:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK20-NEXT: [[TMP68:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP69:%.*]] = sext i32 [[TMP68]] to i64 -// CHECK20-NEXT: [[TMP70:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK20-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 +// CHECK20-NEXT: store i32 [[TMP58]], i32* [[N_CASTED16]], align 4 +// CHECK20-NEXT: [[TMP59:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK20-NEXT: [[TMP60:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP61:%.*]] = sext i32 [[TMP60]] to i64 +// CHECK20-NEXT: [[TMP62:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK20-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK20-NEXT: [[TMP64:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32* +// CHECK20-NEXT: store i32 [[TMP57]], i32* [[TMP65]], align 4 +// CHECK20-NEXT: [[TMP66:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP66]], align 4 +// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK20-NEXT: store i32 [[TMP59]], i32* [[TMP68]], align 4 +// CHECK20-NEXT: [[TMP69:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i32* +// CHECK20-NEXT: store i32 [[TMP59]], i32* [[TMP70]], align 4 +// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP71]], align 4 +// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 // CHECK20-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32* -// CHECK20-NEXT: store i32 [[TMP65]], i32* [[TMP73]], align 4 -// CHECK20-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP74]], align 4 -// CHECK20-NEXT: [[TMP75:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP75]], align 4 -// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK20-NEXT: store i32 [[TMP67]], i32* [[TMP77]], align 4 -// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32* -// CHECK20-NEXT: store i32 [[TMP67]], i32* [[TMP79]], align 4 -// CHECK20-NEXT: [[TMP80:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP80]], align 4 -// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP73]], align 4 +// CHECK20-NEXT: [[TMP74:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 +// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP76]], align 4 +// CHECK20-NEXT: [[TMP77:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP78:%.*]] = bitcast i8** [[TMP77]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP78]], align 4 +// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP80:%.*]] = bitcast i8** [[TMP79]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP80]], align 4 +// CHECK20-NEXT: store i64 [[TMP61]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 3 // CHECK20-NEXT: store i8* null, i8** [[TMP81]], align 4 -// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP83]], align 4 -// CHECK20-NEXT: [[TMP84:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP85:%.*]] = bitcast i8** [[TMP84]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP85]], align 4 -// CHECK20-NEXT: [[TMP86:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP86]], align 4 -// CHECK20-NEXT: [[TMP87:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP87]], align 4 -// CHECK20-NEXT: [[TMP88:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP89:%.*]] = bitcast i8** [[TMP88]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP89]], align 4 -// CHECK20-NEXT: [[TMP90:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP91:%.*]] = bitcast i8** [[TMP90]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP91]], align 4 -// CHECK20-NEXT: [[TMP92:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP69]], i64* [[TMP92]], align 4 -// CHECK20-NEXT: [[TMP93:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP93]], align 4 -// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP95:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP97:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP97]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK20-NEXT: [[TMP98:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK20-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP98]], 0 -// CHECK20-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK20-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK20-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[TMP99:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK20-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP99]], 1 -// CHECK20-NEXT: [[TMP100:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP100]]) -// CHECK20-NEXT: [[TMP101:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP94]], i8** [[TMP95]], i64* [[TMP96]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.7, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP102:%.*]] = icmp ne i32 [[TMP101]], 0 -// CHECK20-NEXT: br i1 [[TMP102]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK20: omp_offload.failed29: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP65]], i32 [[TMP67]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK20: omp_offload.cont30: -// CHECK20-NEXT: [[TMP103:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP103]], i32* [[N_CASTED31]], align 4 -// CHECK20-NEXT: [[TMP104:%.*]] = load i32, i32* [[N_CASTED31]], align 4 -// CHECK20-NEXT: [[TMP105:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP106:%.*]] = sext i32 [[TMP105]] to i64 -// CHECK20-NEXT: [[TMP107:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP108:%.*]] = bitcast i8** [[TMP107]] to i32* -// CHECK20-NEXT: store i32 [[TMP104]], i32* [[TMP108]], align 4 -// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP110:%.*]] = bitcast i8** [[TMP109]] to i32* -// CHECK20-NEXT: store i32 [[TMP104]], i32* [[TMP110]], align 4 -// CHECK20-NEXT: [[TMP111:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP111]], align 4 -// CHECK20-NEXT: [[TMP112:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP112]], align 4 -// CHECK20-NEXT: [[TMP113:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP114:%.*]] = bitcast i8** [[TMP113]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP114]], align 4 -// CHECK20-NEXT: [[TMP115:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP116:%.*]] = bitcast i8** [[TMP115]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP116]], align 4 -// CHECK20-NEXT: [[TMP117:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP117]], align 4 -// CHECK20-NEXT: [[TMP118:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP118]], align 4 -// CHECK20-NEXT: [[TMP119:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP120:%.*]] = bitcast i8** [[TMP119]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP120]], align 4 -// CHECK20-NEXT: [[TMP121:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP122:%.*]] = bitcast i8** [[TMP121]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP122]], align 4 -// CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 2 -// CHECK20-NEXT: store i64 [[TMP106]], i64* [[TMP123]], align 4 -// CHECK20-NEXT: [[TMP124:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS34]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP124]], align 4 -// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS32]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP126:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS33]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES35]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP128:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP128]], i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK20-NEXT: [[TMP129:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_37]], align 4 -// CHECK20-NEXT: [[SUB39:%.*]] = sub nsw i32 [[TMP129]], 0 -// CHECK20-NEXT: [[DIV40:%.*]] = sdiv i32 [[SUB39]], 1 -// CHECK20-NEXT: [[SUB41:%.*]] = sub nsw i32 [[DIV40]], 1 -// CHECK20-NEXT: store i32 [[SUB41]], i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[TMP130:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_38]], align 4 -// CHECK20-NEXT: [[ADD42:%.*]] = add nsw i32 [[TMP130]], 1 -// CHECK20-NEXT: [[TMP131:%.*]] = zext i32 [[ADD42]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP131]]) -// CHECK20-NEXT: [[TMP132:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP125]], i8** [[TMP126]], i64* [[TMP127]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.10, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP133:%.*]] = icmp ne i32 [[TMP132]], 0 -// CHECK20-NEXT: br i1 [[TMP133]], label [[OMP_OFFLOAD_FAILED43:%.*]], label [[OMP_OFFLOAD_CONT44:%.*]] -// CHECK20: omp_offload.failed43: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP104]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT44]] -// CHECK20: omp_offload.cont44: -// CHECK20-NEXT: [[TMP134:%.*]] = load i32, i32* [[M]], align 4 -// CHECK20-NEXT: store i32 [[TMP134]], i32* [[M_CASTED45]], align 4 -// CHECK20-NEXT: [[TMP135:%.*]] = load i32, i32* [[M_CASTED45]], align 4 -// CHECK20-NEXT: [[TMP136:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP136]], i32* [[N_CASTED46]], align 4 -// CHECK20-NEXT: [[TMP137:%.*]] = load i32, i32* [[N_CASTED46]], align 4 -// CHECK20-NEXT: [[TMP138:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK20-NEXT: [[TMP139:%.*]] = sext i32 [[TMP138]] to i64 -// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32* -// CHECK20-NEXT: store i32 [[TMP135]], i32* [[TMP141]], align 4 -// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP143:%.*]] = bitcast i8** [[TMP142]] to i32* -// CHECK20-NEXT: store i32 [[TMP135]], i32* [[TMP143]], align 4 -// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK20-NEXT: store i64 4, i64* [[TMP144]], align 4 -// CHECK20-NEXT: [[TMP145:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP145]], align 4 -// CHECK20-NEXT: [[TMP146:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP147:%.*]] = bitcast i8** [[TMP146]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP147]], align 4 -// CHECK20-NEXT: [[TMP148:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP149:%.*]] = bitcast i8** [[TMP148]] to i32* -// CHECK20-NEXT: store i32 [[TMP137]], i32* [[TMP149]], align 4 -// CHECK20-NEXT: [[TMP150:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP150]], align 4 -// CHECK20-NEXT: [[TMP151:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP151]], align 4 -// CHECK20-NEXT: [[TMP152:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP153:%.*]] = bitcast i8** [[TMP152]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP153]], align 4 -// CHECK20-NEXT: [[TMP154:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP155:%.*]] = bitcast i8** [[TMP154]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP155]], align 4 -// CHECK20-NEXT: [[TMP156:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP156]], align 4 -// CHECK20-NEXT: [[TMP157:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP157]], align 4 -// CHECK20-NEXT: [[TMP158:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP159:%.*]] = bitcast i8** [[TMP158]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP159]], align 4 -// CHECK20-NEXT: [[TMP160:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP161:%.*]] = bitcast i8** [[TMP160]] to i32** -// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP161]], align 4 -// CHECK20-NEXT: [[TMP162:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 3 -// CHECK20-NEXT: store i64 [[TMP139]], i64* [[TMP162]], align 4 -// CHECK20-NEXT: [[TMP163:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS49]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP163]], align 4 -// CHECK20-NEXT: [[TMP164:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS47]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP165:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS48]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP166:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES50]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP167:%.*]] = load i32, i32* [[N]], align 4 -// CHECK20-NEXT: store i32 [[TMP167]], i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK20-NEXT: [[TMP168:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_52]], align 4 -// CHECK20-NEXT: [[SUB54:%.*]] = sub nsw i32 [[TMP168]], 0 -// CHECK20-NEXT: [[DIV55:%.*]] = sdiv i32 [[SUB54]], 1 -// CHECK20-NEXT: [[SUB56:%.*]] = sub nsw i32 [[DIV55]], 1 -// CHECK20-NEXT: store i32 [[SUB56]], i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK20-NEXT: [[TMP169:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_53]], align 4 -// CHECK20-NEXT: [[ADD57:%.*]] = add nsw i32 [[TMP169]], 1 -// CHECK20-NEXT: [[TMP170:%.*]] = zext i32 [[ADD57]] to i64 -// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP170]]) -// CHECK20-NEXT: [[TMP171:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP164]], i8** [[TMP165]], i64* [[TMP166]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP172:%.*]] = icmp ne i32 [[TMP171]], 0 -// CHECK20-NEXT: br i1 [[TMP172]], label [[OMP_OFFLOAD_FAILED58:%.*]], label [[OMP_OFFLOAD_CONT59:%.*]] -// CHECK20: omp_offload.failed58: -// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP135]], i32 [[TMP137]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT59]] -// CHECK20: omp_offload.cont59: -// CHECK20-NEXT: [[TMP173:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP173]]) +// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP83:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP84:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP84]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK20-NEXT: [[TMP85:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK20-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP85]], 0 +// CHECK20-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK20-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK20-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[TMP86:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK20-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP86]], 1 +// CHECK20-NEXT: [[TMP87:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP87]]) +// CHECK20-NEXT: [[TMP88:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164.region_id, i32 4, i8** [[TMP82]], i8** [[TMP83]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP89:%.*]] = icmp ne i32 [[TMP88]], 0 +// CHECK20-NEXT: br i1 [[TMP89]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK20: omp_offload.failed27: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l164(i32 [[TMP57]], i32 [[TMP59]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK20: omp_offload.cont28: +// CHECK20-NEXT: [[TMP90:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP90]], i32* [[N_CASTED29]], align 4 +// CHECK20-NEXT: [[TMP91:%.*]] = load i32, i32* [[N_CASTED29]], align 4 +// CHECK20-NEXT: [[TMP92:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP93:%.*]] = sext i32 [[TMP92]] to i64 +// CHECK20-NEXT: [[TMP94:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP95:%.*]] = bitcast i8** [[TMP94]] to i32* +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP95]], align 4 +// CHECK20-NEXT: [[TMP96:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP97:%.*]] = bitcast i8** [[TMP96]] to i32* +// CHECK20-NEXT: store i32 [[TMP91]], i32* [[TMP97]], align 4 +// CHECK20-NEXT: [[TMP98:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP98]], align 4 +// CHECK20-NEXT: [[TMP99:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP100:%.*]] = bitcast i8** [[TMP99]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP100]], align 4 +// CHECK20-NEXT: [[TMP101:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP102:%.*]] = bitcast i8** [[TMP101]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP102]], align 4 +// CHECK20-NEXT: [[TMP103:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP103]], align 4 +// CHECK20-NEXT: [[TMP104:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP105:%.*]] = bitcast i8** [[TMP104]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP105]], align 4 +// CHECK20-NEXT: [[TMP106:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP107:%.*]] = bitcast i8** [[TMP106]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP107]], align 4 +// CHECK20-NEXT: store i64 [[TMP93]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 2), align 4 +// CHECK20-NEXT: [[TMP108:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS32]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP108]], align 4 +// CHECK20-NEXT: [[TMP109:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS30]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP110:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS31]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP111:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP111]], i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK20-NEXT: [[TMP112:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_34]], align 4 +// CHECK20-NEXT: [[SUB36:%.*]] = sub nsw i32 [[TMP112]], 0 +// CHECK20-NEXT: [[DIV37:%.*]] = sdiv i32 [[SUB36]], 1 +// CHECK20-NEXT: [[SUB38:%.*]] = sub nsw i32 [[DIV37]], 1 +// CHECK20-NEXT: store i32 [[SUB38]], i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[TMP113:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_35]], align 4 +// CHECK20-NEXT: [[ADD39:%.*]] = add nsw i32 [[TMP113]], 1 +// CHECK20-NEXT: [[TMP114:%.*]] = zext i32 [[ADD39]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP114]]) +// CHECK20-NEXT: [[TMP115:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169.region_id, i32 3, i8** [[TMP109]], i8** [[TMP110]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.12, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.13, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP116:%.*]] = icmp ne i32 [[TMP115]], 0 +// CHECK20-NEXT: br i1 [[TMP116]], label [[OMP_OFFLOAD_FAILED40:%.*]], label [[OMP_OFFLOAD_CONT41:%.*]] +// CHECK20: omp_offload.failed40: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l169(i32 [[TMP91]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT41]] +// CHECK20: omp_offload.cont41: +// CHECK20-NEXT: [[TMP117:%.*]] = load i32, i32* [[M]], align 4 +// CHECK20-NEXT: store i32 [[TMP117]], i32* [[M_CASTED42]], align 4 +// CHECK20-NEXT: [[TMP118:%.*]] = load i32, i32* [[M_CASTED42]], align 4 +// CHECK20-NEXT: [[TMP119:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP119]], i32* [[N_CASTED43]], align 4 +// CHECK20-NEXT: [[TMP120:%.*]] = load i32, i32* [[N_CASTED43]], align 4 +// CHECK20-NEXT: [[TMP121:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK20-NEXT: [[TMP122:%.*]] = sext i32 [[TMP121]] to i64 +// CHECK20-NEXT: [[TMP123:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP124:%.*]] = bitcast i8** [[TMP123]] to i32* +// CHECK20-NEXT: store i32 [[TMP118]], i32* [[TMP124]], align 4 +// CHECK20-NEXT: [[TMP125:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP126:%.*]] = bitcast i8** [[TMP125]] to i32* +// CHECK20-NEXT: store i32 [[TMP118]], i32* [[TMP126]], align 4 +// CHECK20-NEXT: [[TMP127:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP127]], align 4 +// CHECK20-NEXT: [[TMP128:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP129:%.*]] = bitcast i8** [[TMP128]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP129]], align 4 +// CHECK20-NEXT: [[TMP130:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP131:%.*]] = bitcast i8** [[TMP130]] to i32* +// CHECK20-NEXT: store i32 [[TMP120]], i32* [[TMP131]], align 4 +// CHECK20-NEXT: [[TMP132:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP132]], align 4 +// CHECK20-NEXT: [[TMP133:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP134:%.*]] = bitcast i8** [[TMP133]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP134]], align 4 +// CHECK20-NEXT: [[TMP135:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP136:%.*]] = bitcast i8** [[TMP135]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP136]], align 4 +// CHECK20-NEXT: [[TMP137:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP137]], align 4 +// CHECK20-NEXT: [[TMP138:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP139:%.*]] = bitcast i8** [[TMP138]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP139]], align 4 +// CHECK20-NEXT: [[TMP140:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP141:%.*]] = bitcast i8** [[TMP140]] to i32** +// CHECK20-NEXT: store i32* [[VLA]], i32** [[TMP141]], align 4 +// CHECK20-NEXT: store i64 [[TMP122]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 3), align 4 +// CHECK20-NEXT: [[TMP142:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS46]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP142]], align 4 +// CHECK20-NEXT: [[TMP143:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS44]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP144:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS45]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP145:%.*]] = load i32, i32* [[N]], align 4 +// CHECK20-NEXT: store i32 [[TMP145]], i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK20-NEXT: [[TMP146:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_48]], align 4 +// CHECK20-NEXT: [[SUB50:%.*]] = sub nsw i32 [[TMP146]], 0 +// CHECK20-NEXT: [[DIV51:%.*]] = sdiv i32 [[SUB50]], 1 +// CHECK20-NEXT: [[SUB52:%.*]] = sub nsw i32 [[DIV51]], 1 +// CHECK20-NEXT: store i32 [[SUB52]], i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK20-NEXT: [[TMP147:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_49]], align 4 +// CHECK20-NEXT: [[ADD53:%.*]] = add nsw i32 [[TMP147]], 1 +// CHECK20-NEXT: [[TMP148:%.*]] = zext i32 [[ADD53]] to i64 +// CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 [[TMP148]]) +// CHECK20-NEXT: [[TMP149:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174.region_id, i32 4, i8** [[TMP143]], i8** [[TMP144]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes.16, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes.17, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP150:%.*]] = icmp ne i32 [[TMP149]], 0 +// CHECK20-NEXT: br i1 [[TMP150]], label [[OMP_OFFLOAD_FAILED54:%.*]], label [[OMP_OFFLOAD_CONT55:%.*]] +// CHECK20: omp_offload.failed54: +// CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l174(i32 [[TMP118]], i32 [[TMP120]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT55]] +// CHECK20: omp_offload.cont55: +// CHECK20-NEXT: [[TMP151:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK20-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP151]]) // CHECK20-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK20-NEXT: [[TMP174:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP174]]) -// CHECK20-NEXT: [[TMP175:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK20-NEXT: ret i32 [[TMP175]] +// CHECK20-NEXT: [[TMP152:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP152]]) +// CHECK20-NEXT: [[TMP153:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK20-NEXT: ret i32 [[TMP153]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l154 @@ -27976,11 +27664,11 @@ // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..5 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28058,7 +27746,7 @@ // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !29 // CHECK20-NEXT: store i32 [[TMP19]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !29 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..6 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..7 to void (i32*, i32*, ...)*), i32 [[TMP17]], i32 [[TMP18]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP20]]), !llvm.access.group !29 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !29 @@ -28112,7 +27800,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..6 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28239,11 +27927,11 @@ // CHECK20-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load i32, i32* [[VLA_ADDR]], align 4 // CHECK20-NEXT: [[TMP1:%.*]] = load i32*, i32** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..8 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..8 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28313,7 +28001,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !35 // CHECK20-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !35 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..9 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 5, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]]), !llvm.access.group !35 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !35 @@ -28344,7 +28032,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..9 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28474,11 +28162,11 @@ // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..11 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..14 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..11 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28554,7 +28242,7 @@ // CHECK20-NEXT: [[TMP18:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !41 // CHECK20-NEXT: store i32 [[TMP18]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 // CHECK20-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !41 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..12 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 6, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, i32*, i32, i32*, i32)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP16]], i32 [[TMP17]], i32* [[TMP0]], i32 [[TMP1]], i32* [[TMP2]], i32 [[TMP19]]), !llvm.access.group !41 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !41 @@ -28585,7 +28273,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..12 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28739,7 +28427,7 @@ // CHECK20-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.16, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l122.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.20, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.21, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK20-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: @@ -28757,7 +28445,7 @@ // CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.19, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.20, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l127.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.24, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.25, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK20-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK20: omp_offload.failed5: @@ -28786,7 +28474,7 @@ // CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.23, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.24, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP32:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l132.region_id, i32 2, i8** [[TMP30]], i8** [[TMP31]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.28, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.29, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP33:%.*]] = icmp ne i32 [[TMP32]], 0 // CHECK20-NEXT: br i1 [[TMP33]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK20: omp_offload.failed11: @@ -28804,7 +28492,7 @@ // CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS13]], i32 0, i32 0 // CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS14]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.27, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.28, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP41:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l137.region_id, i32 1, i8** [[TMP39]], i8** [[TMP40]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.32, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.33, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP42:%.*]] = icmp ne i32 [[TMP41]], 0 // CHECK20-NEXT: br i1 [[TMP42]], label [[OMP_OFFLOAD_FAILED17:%.*]], label [[OMP_OFFLOAD_CONT18:%.*]] // CHECK20: omp_offload.failed17: @@ -28833,7 +28521,7 @@ // CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 // CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [2 x i8*], [2 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i64 10) -// CHECK20-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.31, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.32, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB3]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l142.region_id, i32 2, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_sizes.36, i32 0, i32 0), i64* getelementptr inbounds ([2 x i64], [2 x i64]* @.offload_maptypes.37, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) // CHECK20-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 // CHECK20-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED24:%.*]], label [[OMP_OFFLOAD_CONT25:%.*]] // CHECK20: omp_offload.failed24: @@ -28849,11 +28537,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..14 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..14 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -28899,7 +28587,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !47 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !47 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..15 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..19 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !47 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !47 @@ -28921,7 +28609,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..15 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..19 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29008,11 +28696,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..17 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..22 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..17 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29058,7 +28746,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !53 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !53 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..18 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..23 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !53 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !53 @@ -29080,7 +28768,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..18 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..23 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29176,11 +28864,11 @@ // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..21 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..26 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..21 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29232,7 +28920,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !59 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !59 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..22 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..27 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !59 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !59 @@ -29254,7 +28942,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..22 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..27 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29363,11 +29051,11 @@ // CHECK20-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK20-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK20-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..25 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..30 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..25 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29413,7 +29101,7 @@ // CHECK20: omp.inner.for.body: // CHECK20-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_COMB_LB]], align 4, !llvm.access.group !65 // CHECK20-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_COMB_UB]], align 4, !llvm.access.group !65 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..26 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*)* @.omp_outlined..31 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]]), !llvm.access.group !65 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !65 @@ -29435,7 +29123,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..26 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..31 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29529,11 +29217,11 @@ // CHECK20-NEXT: [[TMP2:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK20-NEXT: store i32 [[TMP2]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK20-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..29 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*, i32)* @.omp_outlined..34 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]], i32 [[TMP3]]) // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..29 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..34 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -29585,7 +29273,7 @@ // CHECK20-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__ADDR]], align 4, !llvm.access.group !71 // CHECK20-NEXT: store i32 [[TMP10]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 // CHECK20-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4, !llvm.access.group !71 -// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..30 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 +// CHECK20-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32, i32, [10 x i32]*, i32)* @.omp_outlined..35 to void (i32*, i32*, ...)*), i32 [[TMP8]], i32 [[TMP9]], [10 x i32]* [[TMP0]], i32 [[TMP11]]), !llvm.access.group !71 // CHECK20-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] // CHECK20: omp.inner.for.inc: // CHECK20-NEXT: [[TMP12:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group !71 @@ -29607,7 +29295,7 @@ // CHECK20-NEXT: ret void // // -// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..30 +// CHECK20-LABEL: define {{[^@]+}}@.omp_outlined..35 // CHECK20-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32 noundef [[DOTPREVIOUS_LB_:%.*]], i32 noundef [[DOTPREVIOUS_UB_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK20-NEXT: entry: // CHECK20-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/teams_distribute_simd_codegen.cpp b/clang/test/OpenMP/teams_distribute_simd_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_simd_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_simd_codegen.cpp @@ -2369,7 +2369,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2391,56 +2390,50 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: // CHECK9-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK9-NEXT: ret i32 [[TMP33]] +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK9-NEXT: ret i32 [[TMP29]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75 @@ -2583,7 +2576,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2605,56 +2597,50 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: // CHECK10-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i64 0 -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK10-NEXT: ret i32 [[TMP33]] +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK10-NEXT: ret i32 [[TMP29]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75 @@ -2797,7 +2783,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2818,56 +2803,50 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: // CHECK11-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK11-NEXT: ret i32 [[TMP33]] +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK11-NEXT: ret i32 [[TMP29]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75 @@ -3008,7 +2987,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3029,56 +3007,50 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: // CHECK12-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[VLA]], i32 0 -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK12-NEXT: ret i32 [[TMP33]] +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK12-NEXT: ret i32 [[TMP29]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z15teams_local_argv_l75 @@ -3528,7 +3500,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK17-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK17-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 // CHECK17-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 @@ -3547,45 +3518,39 @@ // CHECK17-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK17-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 8 -// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 8 -// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP12]], align 8 -// CHECK17-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK17-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 8 -// CHECK17-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK17-NEXT: store float* [[B]], float** [[TMP16]], align 8 -// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 4, i64* [[TMP17]], align 8 -// CHECK17-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK17-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 8 -// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK17-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 8 -// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 492, i64* [[TMP23]], align 8 -// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK17-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 8 +// CHECK17-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP11]], align 8 +// CHECK17-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK17-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 8 +// CHECK17-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK17-NEXT: store float* [[B]], float** [[TMP15]], align 8 +// CHECK17-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK17-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 8 +// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK17-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 8 +// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK17-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK17-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK17-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK17-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK17-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: // CHECK17-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK17-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i64 0, i64 0 -// CHECK17-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK17-NEXT: ret i32 [[TMP30]] +// CHECK17-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK17-NEXT: ret i32 [[TMP26]] // // // CHECK17-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -3698,7 +3663,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK18-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK18-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 // CHECK18-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 @@ -3717,45 +3681,39 @@ // CHECK18-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK18-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 8 -// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 8 -// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP12]], align 8 -// CHECK18-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK18-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 8 -// CHECK18-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK18-NEXT: store float* [[B]], float** [[TMP16]], align 8 -// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 4, i64* [[TMP17]], align 8 -// CHECK18-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK18-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 8 -// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK18-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 8 -// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 492, i64* [[TMP23]], align 8 -// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK18-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 8 +// CHECK18-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP11]], align 8 +// CHECK18-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK18-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 8 +// CHECK18-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK18-NEXT: store float* [[B]], float** [[TMP15]], align 8 +// CHECK18-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK18-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 8 +// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK18-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 8 +// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK18-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK18-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK18-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK18-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK18-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK18: omp_offload.cont: // CHECK18-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK18-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i64 0, i64 0 -// CHECK18-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK18-NEXT: ret i32 [[TMP30]] +// CHECK18-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK18-NEXT: ret i32 [[TMP26]] // // // CHECK18-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -3868,7 +3826,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK19-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK19-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 // CHECK19-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 @@ -3887,45 +3844,39 @@ // CHECK19-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK19-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 4 -// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 4 -// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP12]], align 4 -// CHECK19-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK19-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 4 -// CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK19-NEXT: store float* [[B]], float** [[TMP16]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK19-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK19-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 492, i64* [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK19-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 4 +// CHECK19-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP11]], align 4 +// CHECK19-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK19-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 4 +// CHECK19-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK19-NEXT: store float* [[B]], float** [[TMP15]], align 4 +// CHECK19-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP16]], align 4 +// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK19-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 4 +// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK19-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 4 +// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP21]], align 4 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK19-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK19-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK19-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK19-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK19-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: // CHECK19-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK19-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK19-NEXT: ret i32 [[TMP30]] +// CHECK19-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK19-NEXT: ret i32 [[TMP26]] // // // CHECK19-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -4037,7 +3988,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK20-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK20-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 // CHECK20-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 @@ -4056,45 +4006,39 @@ // CHECK20-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK20-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 4 -// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 4 -// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP12]], align 4 -// CHECK20-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK20-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 4 -// CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK20-NEXT: store float* [[B]], float** [[TMP16]], align 4 -// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK20-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 4 -// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK20-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 4 -// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 492, i64* [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK20-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 4 +// CHECK20-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP11]], align 4 +// CHECK20-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK20-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 4 +// CHECK20-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK20-NEXT: store float* [[B]], float** [[TMP15]], align 4 +// CHECK20-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP16]], align 4 +// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK20-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 4 +// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK20-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 4 +// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP21]], align 4 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK20-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK20-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK20-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK20-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK20-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK20: omp_offload.cont: // CHECK20-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK20-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK20-NEXT: ret i32 [[TMP30]] +// CHECK20-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK20-NEXT: ret i32 [[TMP26]] // // // CHECK20-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -4206,7 +4150,6 @@ // CHECK21-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK21-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK21-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK21-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK21-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK21-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 // CHECK21-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 @@ -4225,45 +4168,39 @@ // CHECK21-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK21-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK21-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 8 -// CHECK21-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK21-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 8 -// CHECK21-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK21-NEXT: store i8* null, i8** [[TMP12]], align 8 -// CHECK21-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK21-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 8 -// CHECK21-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK21-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK21-NEXT: store float* [[B]], float** [[TMP16]], align 8 -// CHECK21-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK21-NEXT: store i64 4, i64* [[TMP17]], align 8 -// CHECK21-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK21-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK21-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK21-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK21-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 8 -// CHECK21-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK21-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK21-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 8 -// CHECK21-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK21-NEXT: store i64 492, i64* [[TMP23]], align 8 -// CHECK21-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK21-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK21-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK21-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK21-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 8 +// CHECK21-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK21-NEXT: store i8* null, i8** [[TMP11]], align 8 +// CHECK21-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK21-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK21-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 8 +// CHECK21-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK21-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK21-NEXT: store float* [[B]], float** [[TMP15]], align 8 +// CHECK21-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK21-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK21-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK21-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK21-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 8 +// CHECK21-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK21-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK21-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 8 +// CHECK21-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK21-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK21-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK21-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK21-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK21-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK21-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK21-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK21-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK21-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK21-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK21: omp_offload.failed: // CHECK21-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK21-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK21: omp_offload.cont: // CHECK21-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK21-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i64 0, i64 0 -// CHECK21-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK21-NEXT: ret i32 [[TMP30]] +// CHECK21-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK21-NEXT: ret i32 [[TMP26]] // // // CHECK21-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -4428,7 +4365,6 @@ // CHECK22-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK22-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK22-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK22-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK22-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK22-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 8 // CHECK22-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 8 @@ -4447,45 +4383,39 @@ // CHECK22-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK22-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK22-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 8 -// CHECK22-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK22-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 8 -// CHECK22-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK22-NEXT: store i8* null, i8** [[TMP12]], align 8 -// CHECK22-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK22-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK22-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 8 -// CHECK22-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK22-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK22-NEXT: store float* [[B]], float** [[TMP16]], align 8 -// CHECK22-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK22-NEXT: store i64 4, i64* [[TMP17]], align 8 -// CHECK22-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK22-NEXT: store i8* null, i8** [[TMP18]], align 8 -// CHECK22-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK22-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK22-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 8 -// CHECK22-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK22-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK22-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 8 -// CHECK22-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK22-NEXT: store i64 492, i64* [[TMP23]], align 8 -// CHECK22-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK22-NEXT: store i8* null, i8** [[TMP24]], align 8 -// CHECK22-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK22-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK22-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK22-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 8 +// CHECK22-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK22-NEXT: store i8* null, i8** [[TMP11]], align 8 +// CHECK22-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK22-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK22-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 8 +// CHECK22-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK22-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK22-NEXT: store float* [[B]], float** [[TMP15]], align 8 +// CHECK22-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK22-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK22-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK22-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK22-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 8 +// CHECK22-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK22-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK22-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 8 +// CHECK22-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK22-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK22-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK22-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK22-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK22-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK22-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK22-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK22-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK22-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK22-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK22: omp_offload.failed: // CHECK22-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK22-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK22: omp_offload.cont: // CHECK22-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK22-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i64 0, i64 0 -// CHECK22-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK22-NEXT: ret i32 [[TMP30]] +// CHECK22-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK22-NEXT: ret i32 [[TMP26]] // // // CHECK22-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -4650,7 +4580,6 @@ // CHECK23-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK23-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK23-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK23-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK23-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK23-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 // CHECK23-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 @@ -4669,45 +4598,39 @@ // CHECK23-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK23-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK23-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 4 -// CHECK23-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK23-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 4 -// CHECK23-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK23-NEXT: store i8* null, i8** [[TMP12]], align 4 -// CHECK23-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK23-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 4 -// CHECK23-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK23-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK23-NEXT: store float* [[B]], float** [[TMP16]], align 4 -// CHECK23-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK23-NEXT: store i64 4, i64* [[TMP17]], align 4 -// CHECK23-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK23-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK23-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK23-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 4 -// CHECK23-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK23-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK23-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 4 -// CHECK23-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK23-NEXT: store i64 492, i64* [[TMP23]], align 4 -// CHECK23-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK23-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK23-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK23-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 4 +// CHECK23-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK23-NEXT: store i8* null, i8** [[TMP11]], align 4 +// CHECK23-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK23-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK23-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 4 +// CHECK23-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK23-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK23-NEXT: store float* [[B]], float** [[TMP15]], align 4 +// CHECK23-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK23-NEXT: store i8* null, i8** [[TMP16]], align 4 +// CHECK23-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK23-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK23-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 4 +// CHECK23-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK23-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK23-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 4 +// CHECK23-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK23-NEXT: store i8* null, i8** [[TMP21]], align 4 +// CHECK23-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK23-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK23-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK23-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK23-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK23-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK23-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK23-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK23-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK23: omp_offload.failed: // CHECK23-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK23-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK23: omp_offload.cont: // CHECK23-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK23-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i32 0, i32 0 -// CHECK23-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK23-NEXT: ret i32 [[TMP30]] +// CHECK23-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK23-NEXT: ret i32 [[TMP26]] // // // CHECK23-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -4870,7 +4793,6 @@ // CHECK24-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK24-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK24-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK24-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK24-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK24-NEXT: store %struct.SS* [[THIS]], %struct.SS** [[THIS_ADDR]], align 4 // CHECK24-NEXT: [[THIS1:%.*]] = load %struct.SS*, %struct.SS** [[THIS_ADDR]], align 4 @@ -4889,45 +4811,39 @@ // CHECK24-NEXT: [[TMP9:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK24-NEXT: [[TMP10:%.*]] = bitcast i8** [[TMP9]] to [123 x i32]** // CHECK24-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP10]], align 4 -// CHECK24-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK24-NEXT: store i64 [[TMP6]], i64* [[TMP11]], align 4 -// CHECK24-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK24-NEXT: store i8* null, i8** [[TMP12]], align 4 -// CHECK24-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK24-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to %struct.SS** -// CHECK24-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP14]], align 4 -// CHECK24-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK24-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** -// CHECK24-NEXT: store float* [[B]], float** [[TMP16]], align 4 -// CHECK24-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK24-NEXT: store i64 4, i64* [[TMP17]], align 4 -// CHECK24-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK24-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK24-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 -// CHECK24-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.SS** -// CHECK24-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP20]], align 4 -// CHECK24-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK24-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to [123 x i32]** -// CHECK24-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP22]], align 4 -// CHECK24-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK24-NEXT: store i64 492, i64* [[TMP23]], align 4 -// CHECK24-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK24-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK24-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK24-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK24-NEXT: [[TMP27:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 +// CHECK24-NEXT: store i64 [[TMP6]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), align 4 +// CHECK24-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK24-NEXT: store i8* null, i8** [[TMP11]], align 4 +// CHECK24-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK24-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to %struct.SS** +// CHECK24-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP13]], align 4 +// CHECK24-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK24-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to float** +// CHECK24-NEXT: store float* [[B]], float** [[TMP15]], align 4 +// CHECK24-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK24-NEXT: store i8* null, i8** [[TMP16]], align 4 +// CHECK24-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK24-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to %struct.SS** +// CHECK24-NEXT: store %struct.SS* [[THIS1]], %struct.SS** [[TMP18]], align 4 +// CHECK24-NEXT: [[TMP19:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 +// CHECK24-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to [123 x i32]** +// CHECK24-NEXT: store [123 x i32]* [[A]], [123 x i32]** [[TMP20]], align 4 +// CHECK24-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK24-NEXT: store i8* null, i8** [[TMP21]], align 4 +// CHECK24-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK24-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK24-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 123) -// CHECK24-NEXT: [[TMP28:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP25]], i8** [[TMP26]], i64* [[TMP27]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK24-NEXT: [[TMP29:%.*]] = icmp ne i32 [[TMP28]], 0 -// CHECK24-NEXT: br i1 [[TMP29]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK24-NEXT: [[TMP24:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123.region_id, i32 3, i8** [[TMP22]], i8** [[TMP23]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK24-NEXT: [[TMP25:%.*]] = icmp ne i32 [[TMP24]], 0 +// CHECK24-NEXT: br i1 [[TMP25]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK24: omp_offload.failed: // CHECK24-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123(%struct.SS* [[THIS1]]) #[[ATTR2:[0-9]+]] // CHECK24-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK24: omp_offload.cont: // CHECK24-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SS]], %struct.SS* [[THIS1]], i32 0, i32 0 // CHECK24-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [123 x i32], [123 x i32]* [[A2]], i32 0, i32 0 -// CHECK24-NEXT: [[TMP30:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 -// CHECK24-NEXT: ret i32 [[TMP30]] +// CHECK24-NEXT: [[TMP26:%.*]] = load i32, i32* [[ARRAYIDX]], align 4 +// CHECK24-NEXT: ret i32 [[TMP26]] // // // CHECK24-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2SSIiLi123ELx456EE3fooEv_l123 @@ -5714,7 +5630,6 @@ // CHECK33-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK33-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK33-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK33-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK33-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK33-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK33-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -5739,47 +5654,41 @@ // CHECK33-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK33-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK33-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK33-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK33-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK33-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK33-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK33-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK33-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK33-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK33-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK33-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK33-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK33-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK33-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK33-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK33-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK33-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK33-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK33-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK33-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK33-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK33-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK33-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK33-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK33-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK33-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK33-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK33-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK33-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK33-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK33-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK33-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK33-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK33-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK33-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK33-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK33-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK33-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK33-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK33-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK33-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK33-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK33-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK33-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK33-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK33-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK33-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK33-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK33-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK33-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK33-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK33-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK33-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK33-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK33-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK33-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK33-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK33-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK33-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK33-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK33-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK33-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK33-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK33-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK33-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK33-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK33-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK33-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK33-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK33-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK33-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK33-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK33-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK33: omp_offload.failed: // CHECK33-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK33-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -5787,10 +5696,10 @@ // CHECK33-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK33-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP33]]) // CHECK33-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK33-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK33-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK33-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK33-NEXT: ret i32 [[TMP35]] +// CHECK33-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK33-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK33-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK33-NEXT: ret i32 [[TMP31]] // // // CHECK33-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -5968,7 +5877,7 @@ // CHECK33-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK33-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK33-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK33-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK33-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK33-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK33-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK33: omp_offload.failed: @@ -6092,7 +6001,6 @@ // CHECK34-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK34-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK34-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK34-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK34-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK34-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK34-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6117,47 +6025,41 @@ // CHECK34-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK34-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK34-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK34-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK34-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK34-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK34-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK34-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK34-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK34-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK34-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK34-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK34-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK34-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK34-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK34-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK34-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK34-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK34-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK34-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK34-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK34-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK34-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK34-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK34-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK34-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK34-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK34-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK34-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK34-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK34-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK34-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK34-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK34-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK34-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK34-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK34-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK34-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK34-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK34-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK34-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK34-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK34-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK34-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK34-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK34-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK34-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK34-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK34-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK34-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK34-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK34-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK34-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK34-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK34-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK34-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK34-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK34-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK34-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK34-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK34-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK34-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK34-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK34-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK34-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK34-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK34-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK34-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK34-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK34-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK34-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK34-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK34-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK34-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK34: omp_offload.failed: // CHECK34-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK34-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6165,10 +6067,10 @@ // CHECK34-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK34-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP33]]) // CHECK34-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK34-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK34-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK34-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK34-NEXT: ret i32 [[TMP35]] +// CHECK34-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK34-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK34-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK34-NEXT: ret i32 [[TMP31]] // // // CHECK34-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -6346,7 +6248,7 @@ // CHECK34-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK34-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK34-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK34-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK34-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK34-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK34-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK34: omp_offload.failed: @@ -6470,7 +6372,6 @@ // CHECK35-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK35-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK35-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK35-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK35-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK35-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK35-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6494,47 +6395,41 @@ // CHECK35-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK35-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK35-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK35-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK35-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK35-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK35-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK35-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK35-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK35-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK35-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK35-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK35-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK35-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK35-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK35-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK35-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK35-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK35-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK35-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK35-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK35-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK35-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK35-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK35-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK35-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK35-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK35-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK35-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK35-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK35-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK35-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK35-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK35-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK35-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK35-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK35-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK35-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK35-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK35-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK35-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK35-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK35-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK35-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK35-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK35-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK35-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK35-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK35-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK35-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK35-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK35-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK35-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK35-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK35-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK35-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK35-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK35-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK35-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK35-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK35-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK35-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK35-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK35-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK35-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK35-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK35-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK35-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK35-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK35-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK35-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK35-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK35-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK35-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK35: omp_offload.failed: // CHECK35-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK35-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6542,10 +6437,10 @@ // CHECK35-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK35-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP33]]) // CHECK35-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK35-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK35-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK35-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK35-NEXT: ret i32 [[TMP35]] +// CHECK35-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK35-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK35-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK35-NEXT: ret i32 [[TMP31]] // // // CHECK35-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -6719,7 +6614,7 @@ // CHECK35-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK35-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK35-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK35-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK35-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK35-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK35-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK35: omp_offload.failed: @@ -6840,7 +6735,6 @@ // CHECK36-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK36-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK36-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK36-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK36-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK36-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK36-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -6864,47 +6758,41 @@ // CHECK36-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK36-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK36-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK36-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK36-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK36-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK36-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK36-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK36-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK36-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK36-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK36-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK36-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK36-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK36-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK36-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK36-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK36-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK36-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK36-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK36-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK36-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK36-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK36-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK36-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK36-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK36-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK36-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK36-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK36-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK36-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK36-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK36-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK36-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK36-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK36-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK36-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK36-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK36-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK36-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK36-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK36-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK36-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK36-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK36-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK36-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK36-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK36-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK36-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK36-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK36-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK36-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK36-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK36-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK36-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK36-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK36-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK36-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK36-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK36-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK36-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK36-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK36-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK36-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK36-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK36-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK36-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK36-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK36-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK36-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK36-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK36-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK36-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK36-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK36: omp_offload.failed: // CHECK36-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK36-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -6912,10 +6800,10 @@ // CHECK36-NEXT: [[TMP33:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK36-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP33]]) // CHECK36-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK36-NEXT: [[TMP34:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK36-NEXT: call void @llvm.stackrestore(i8* [[TMP34]]) -// CHECK36-NEXT: [[TMP35:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK36-NEXT: ret i32 [[TMP35]] +// CHECK36-NEXT: [[TMP30:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK36-NEXT: call void @llvm.stackrestore(i8* [[TMP30]]) +// CHECK36-NEXT: [[TMP31:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK36-NEXT: ret i32 [[TMP31]] // // // CHECK36-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -7089,7 +6977,7 @@ // CHECK36-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK36-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK36-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK36-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK36-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK36-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK36-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK36: omp_offload.failed: @@ -7211,7 +7099,6 @@ // CHECK37-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK37-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK37-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 -// CHECK37-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 8 // CHECK37-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK37-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK37-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 @@ -7240,57 +7127,49 @@ // CHECK37-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK37-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK37-NEXT: store i64 [[TMP4]], i64* [[TMP11]], align 8 -// CHECK37-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK37-NEXT: store i64 4, i64* [[TMP12]], align 8 -// CHECK37-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK37-NEXT: store i8* null, i8** [[TMP13]], align 8 -// CHECK37-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK37-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK37-NEXT: store i64 [[TMP6]], i64* [[TMP15]], align 8 -// CHECK37-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK37-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK37-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK37-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK37-NEXT: store i64 4, i64* [[TMP18]], align 8 -// CHECK37-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK37-NEXT: store i8* null, i8** [[TMP19]], align 8 -// CHECK37-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK37-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK37-NEXT: store i8* null, i8** [[TMP12]], align 8 +// CHECK37-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK37-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK37-NEXT: store i64 [[TMP6]], i64* [[TMP14]], align 8 +// CHECK37-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK37-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK37-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK37-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK37-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK37-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK37-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK37-NEXT: store i64 [[TMP1]], i64* [[TMP19]], align 8 +// CHECK37-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK37-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* // CHECK37-NEXT: store i64 [[TMP1]], i64* [[TMP21]], align 8 -// CHECK37-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK37-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* -// CHECK37-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 -// CHECK37-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK37-NEXT: store i64 8, i64* [[TMP24]], align 8 -// CHECK37-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK37-NEXT: store i8* null, i8** [[TMP25]], align 8 -// CHECK37-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK37-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32** -// CHECK37-NEXT: store i32* [[VLA]], i32** [[TMP27]], align 8 -// CHECK37-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK37-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32** -// CHECK37-NEXT: store i32* [[VLA]], i32** [[TMP29]], align 8 -// CHECK37-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK37-NEXT: store i64 [[TMP7]], i64* [[TMP30]], align 8 -// CHECK37-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK37-NEXT: store i8* null, i8** [[TMP31]], align 8 -// CHECK37-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK37-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK37-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK37-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK37-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK37-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK37-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK37-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK37-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK37-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK37-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** +// CHECK37-NEXT: store i32* [[VLA]], i32** [[TMP24]], align 8 +// CHECK37-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK37-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32** +// CHECK37-NEXT: store i32* [[VLA]], i32** [[TMP26]], align 8 +// CHECK37-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 3), align 8 +// CHECK37-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK37-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK37-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK37-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK37-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK37-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK37-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK37-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK37-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK37-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK37-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK37-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK37-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK37-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK37-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK37-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK37-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK37-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK37-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK37-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK37-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK37-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK37-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK37-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK37-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK37: omp_offload.failed: // CHECK37-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK37-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -7298,10 +7177,10 @@ // CHECK37-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK37-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP41]]) // CHECK37-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK37-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK37-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK37-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK37-NEXT: ret i32 [[TMP43]] +// CHECK37-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK37-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK37-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK37-NEXT: ret i32 [[TMP38]] // // // CHECK37-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -7529,7 +7408,7 @@ // CHECK37-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK37-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK37-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK37-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK37-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK37-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK37-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK37: omp_offload.failed: @@ -7654,7 +7533,6 @@ // CHECK38-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK38-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 8 // CHECK38-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 8 -// CHECK38-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 8 // CHECK38-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK38-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK38-NEXT: [[DOTCAPTURE_EXPR_2:%.*]] = alloca i32, align 4 @@ -7683,57 +7561,49 @@ // CHECK38-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK38-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i64* // CHECK38-NEXT: store i64 [[TMP4]], i64* [[TMP11]], align 8 -// CHECK38-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK38-NEXT: store i64 4, i64* [[TMP12]], align 8 -// CHECK38-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK38-NEXT: store i8* null, i8** [[TMP13]], align 8 -// CHECK38-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK38-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK38-NEXT: store i64 [[TMP6]], i64* [[TMP15]], align 8 -// CHECK38-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK38-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i64* -// CHECK38-NEXT: store i64 [[TMP6]], i64* [[TMP17]], align 8 -// CHECK38-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK38-NEXT: store i64 4, i64* [[TMP18]], align 8 -// CHECK38-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK38-NEXT: store i8* null, i8** [[TMP19]], align 8 -// CHECK38-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK38-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK38-NEXT: store i8* null, i8** [[TMP12]], align 8 +// CHECK38-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK38-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK38-NEXT: store i64 [[TMP6]], i64* [[TMP14]], align 8 +// CHECK38-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK38-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i64* +// CHECK38-NEXT: store i64 [[TMP6]], i64* [[TMP16]], align 8 +// CHECK38-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK38-NEXT: store i8* null, i8** [[TMP17]], align 8 +// CHECK38-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK38-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* +// CHECK38-NEXT: store i64 [[TMP1]], i64* [[TMP19]], align 8 +// CHECK38-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK38-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* // CHECK38-NEXT: store i64 [[TMP1]], i64* [[TMP21]], align 8 -// CHECK38-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK38-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* -// CHECK38-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 -// CHECK38-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK38-NEXT: store i64 8, i64* [[TMP24]], align 8 -// CHECK38-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK38-NEXT: store i8* null, i8** [[TMP25]], align 8 -// CHECK38-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK38-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32** -// CHECK38-NEXT: store i32* [[VLA]], i32** [[TMP27]], align 8 -// CHECK38-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK38-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32** -// CHECK38-NEXT: store i32* [[VLA]], i32** [[TMP29]], align 8 -// CHECK38-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK38-NEXT: store i64 [[TMP7]], i64* [[TMP30]], align 8 -// CHECK38-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK38-NEXT: store i8* null, i8** [[TMP31]], align 8 -// CHECK38-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK38-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK38-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK38-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK38-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK38-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK38-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK38-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK38-NEXT: store i8* null, i8** [[TMP22]], align 8 +// CHECK38-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK38-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** +// CHECK38-NEXT: store i32* [[VLA]], i32** [[TMP24]], align 8 +// CHECK38-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK38-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32** +// CHECK38-NEXT: store i32* [[VLA]], i32** [[TMP26]], align 8 +// CHECK38-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 3), align 8 +// CHECK38-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK38-NEXT: store i8* null, i8** [[TMP27]], align 8 +// CHECK38-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK38-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK38-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK38-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK38-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK38-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK38-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK38-NEXT: [[SUB3:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK38-NEXT: store i32 [[SUB3]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK38-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK38-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK38-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK38-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK38-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK38-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK38-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK38-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK38-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK38-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK38-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK38-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK38-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK38-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK38: omp_offload.failed: // CHECK38-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i64 [[TMP4]], i64 [[TMP6]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK38-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -7741,10 +7611,10 @@ // CHECK38-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK38-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP41]]) // CHECK38-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK38-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK38-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK38-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK38-NEXT: ret i32 [[TMP43]] +// CHECK38-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK38-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK38-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK38-NEXT: ret i32 [[TMP38]] // // // CHECK38-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -7972,7 +7842,7 @@ // CHECK38-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK38-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK38-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK38-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK38-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK38-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK38-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK38: omp_offload.failed: @@ -8097,7 +7967,6 @@ // CHECK39-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK39-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK39-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 -// CHECK39-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 4 // CHECK39-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK39-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK39-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -8124,57 +7993,49 @@ // CHECK39-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK39-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK39-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 -// CHECK39-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK39-NEXT: store i64 4, i64* [[TMP12]], align 4 -// CHECK39-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK39-NEXT: store i8* null, i8** [[TMP13]], align 4 -// CHECK39-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK39-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK39-NEXT: store i32 [[TMP5]], i32* [[TMP15]], align 4 -// CHECK39-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK39-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK39-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK39-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK39-NEXT: store i64 4, i64* [[TMP18]], align 4 -// CHECK39-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK39-NEXT: store i8* null, i8** [[TMP19]], align 4 -// CHECK39-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK39-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK39-NEXT: store i8* null, i8** [[TMP12]], align 4 +// CHECK39-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK39-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK39-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 +// CHECK39-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK39-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK39-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK39-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK39-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK39-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK39-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK39-NEXT: store i32 [[TMP0]], i32* [[TMP19]], align 4 +// CHECK39-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK39-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* // CHECK39-NEXT: store i32 [[TMP0]], i32* [[TMP21]], align 4 -// CHECK39-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK39-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* -// CHECK39-NEXT: store i32 [[TMP0]], i32* [[TMP23]], align 4 -// CHECK39-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK39-NEXT: store i64 4, i64* [[TMP24]], align 4 -// CHECK39-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK39-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK39-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK39-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32** -// CHECK39-NEXT: store i32* [[VLA]], i32** [[TMP27]], align 4 -// CHECK39-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK39-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32** -// CHECK39-NEXT: store i32* [[VLA]], i32** [[TMP29]], align 4 -// CHECK39-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK39-NEXT: store i64 [[TMP7]], i64* [[TMP30]], align 4 -// CHECK39-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK39-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK39-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK39-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK39-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK39-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK39-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK39-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK39-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK39-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK39-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK39-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK39-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** +// CHECK39-NEXT: store i32* [[VLA]], i32** [[TMP24]], align 4 +// CHECK39-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK39-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32** +// CHECK39-NEXT: store i32* [[VLA]], i32** [[TMP26]], align 4 +// CHECK39-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 3), align 4 +// CHECK39-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK39-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK39-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK39-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK39-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK39-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK39-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK39-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK39-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK39-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK39-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK39-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK39-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK39-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK39-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK39-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK39-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK39-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK39-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK39-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK39-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK39-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK39-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK39-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK39-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK39: omp_offload.failed: // CHECK39-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK39-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -8182,10 +8043,10 @@ // CHECK39-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK39-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP41]]) // CHECK39-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK39-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK39-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK39-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK39-NEXT: ret i32 [[TMP43]] +// CHECK39-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK39-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK39-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK39-NEXT: ret i32 [[TMP38]] // // // CHECK39-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -8407,7 +8268,7 @@ // CHECK39-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK39-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK39-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK39-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK39-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK39-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK39-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK39: omp_offload.failed: @@ -8529,7 +8390,6 @@ // CHECK40-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK40-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [4 x i8*], align 4 // CHECK40-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [4 x i8*], align 4 -// CHECK40-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [4 x i64], align 4 // CHECK40-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK40-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK40-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -8556,57 +8416,49 @@ // CHECK40-NEXT: [[TMP10:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK40-NEXT: [[TMP11:%.*]] = bitcast i8** [[TMP10]] to i32* // CHECK40-NEXT: store i32 [[TMP3]], i32* [[TMP11]], align 4 -// CHECK40-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK40-NEXT: store i64 4, i64* [[TMP12]], align 4 -// CHECK40-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK40-NEXT: store i8* null, i8** [[TMP13]], align 4 -// CHECK40-NEXT: [[TMP14:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK40-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK40-NEXT: store i32 [[TMP5]], i32* [[TMP15]], align 4 -// CHECK40-NEXT: [[TMP16:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK40-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* -// CHECK40-NEXT: store i32 [[TMP5]], i32* [[TMP17]], align 4 -// CHECK40-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK40-NEXT: store i64 4, i64* [[TMP18]], align 4 -// CHECK40-NEXT: [[TMP19:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK40-NEXT: store i8* null, i8** [[TMP19]], align 4 -// CHECK40-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK40-NEXT: [[TMP12:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK40-NEXT: store i8* null, i8** [[TMP12]], align 4 +// CHECK40-NEXT: [[TMP13:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK40-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK40-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 +// CHECK40-NEXT: [[TMP15:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK40-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i32* +// CHECK40-NEXT: store i32 [[TMP5]], i32* [[TMP16]], align 4 +// CHECK40-NEXT: [[TMP17:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK40-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK40-NEXT: [[TMP18:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK40-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK40-NEXT: store i32 [[TMP0]], i32* [[TMP19]], align 4 +// CHECK40-NEXT: [[TMP20:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK40-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32* // CHECK40-NEXT: store i32 [[TMP0]], i32* [[TMP21]], align 4 -// CHECK40-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK40-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i32* -// CHECK40-NEXT: store i32 [[TMP0]], i32* [[TMP23]], align 4 -// CHECK40-NEXT: [[TMP24:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK40-NEXT: store i64 4, i64* [[TMP24]], align 4 -// CHECK40-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK40-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK40-NEXT: [[TMP26:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK40-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32** -// CHECK40-NEXT: store i32* [[VLA]], i32** [[TMP27]], align 4 -// CHECK40-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK40-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32** -// CHECK40-NEXT: store i32* [[VLA]], i32** [[TMP29]], align 4 -// CHECK40-NEXT: [[TMP30:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK40-NEXT: store i64 [[TMP7]], i64* [[TMP30]], align 4 -// CHECK40-NEXT: [[TMP31:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK40-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK40-NEXT: [[TMP32:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK40-NEXT: [[TMP33:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK40-NEXT: [[TMP34:%.*]] = getelementptr inbounds [4 x i64], [4 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK40-NEXT: [[TMP35:%.*]] = load i32, i32* [[N]], align 4 -// CHECK40-NEXT: store i32 [[TMP35]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK40-NEXT: [[TMP36:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK40-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP36]], 0 +// CHECK40-NEXT: [[TMP22:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK40-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK40-NEXT: [[TMP23:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK40-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32** +// CHECK40-NEXT: store i32* [[VLA]], i32** [[TMP24]], align 4 +// CHECK40-NEXT: [[TMP25:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK40-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32** +// CHECK40-NEXT: store i32* [[VLA]], i32** [[TMP26]], align 4 +// CHECK40-NEXT: store i64 [[TMP7]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 3), align 4 +// CHECK40-NEXT: [[TMP27:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK40-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK40-NEXT: [[TMP28:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK40-NEXT: [[TMP29:%.*]] = getelementptr inbounds [4 x i8*], [4 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK40-NEXT: [[TMP30:%.*]] = load i32, i32* [[N]], align 4 +// CHECK40-NEXT: store i32 [[TMP30]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK40-NEXT: [[TMP31:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK40-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP31]], 0 // CHECK40-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK40-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK40-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK40-NEXT: [[TMP37:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK40-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP37]], 1 -// CHECK40-NEXT: [[TMP38:%.*]] = zext i32 [[ADD]] to i64 -// CHECK40-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP38]]) -// CHECK40-NEXT: [[TMP39:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP32]], i8** [[TMP33]], i64* [[TMP34]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK40-NEXT: [[TMP40:%.*]] = icmp ne i32 [[TMP39]], 0 -// CHECK40-NEXT: br i1 [[TMP40]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK40-NEXT: [[TMP32:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK40-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP32]], 1 +// CHECK40-NEXT: [[TMP33:%.*]] = zext i32 [[ADD]] to i64 +// CHECK40-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP33]]) +// CHECK40-NEXT: [[TMP34:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192.region_id, i32 4, i8** [[TMP28]], i8** [[TMP29]], i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([4 x i64], [4 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK40-NEXT: [[TMP35:%.*]] = icmp ne i32 [[TMP34]], 0 +// CHECK40-NEXT: br i1 [[TMP35]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK40: omp_offload.failed: // CHECK40-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192(i32 [[TMP3]], i32 [[TMP5]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK40-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -8614,10 +8466,10 @@ // CHECK40-NEXT: [[TMP41:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK40-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP41]]) // CHECK40-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK40-NEXT: [[TMP42:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK40-NEXT: call void @llvm.stackrestore(i8* [[TMP42]]) -// CHECK40-NEXT: [[TMP43:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK40-NEXT: ret i32 [[TMP43]] +// CHECK40-NEXT: [[TMP37:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK40-NEXT: call void @llvm.stackrestore(i8* [[TMP37]]) +// CHECK40-NEXT: [[TMP38:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK40-NEXT: ret i32 [[TMP38]] // // // CHECK40-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l192 @@ -8839,7 +8691,7 @@ // CHECK40-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK40-NEXT: [[TMP21:%.*]] = load i32, i32* [[TE]], align 4 // CHECK40-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK40-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) +// CHECK40-NEXT: [[TMP22:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l181.region_id, i32 3, i8** [[TMP19]], i8** [[TMP20]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 [[TMP21]], i32 1) // CHECK40-NEXT: [[TMP23:%.*]] = icmp ne i32 [[TMP22]], 0 // CHECK40-NEXT: br i1 [[TMP23]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK40: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp b/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_simd_collapse_codegen.cpp @@ -29,7 +29,7 @@ #pragma omp teams distribute simd collapse(2) for(int i = 0; i < X; i++) { for(int j = 0; j < Y; j++) { - a[i][j] = (T)0; + a[i][j] = (T)0; } } @@ -998,7 +998,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1035,74 +1034,64 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK9-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK9-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK9-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK9-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK9-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK9-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK9-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK9-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK9-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK9-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK9-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK9-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK9-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK9-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK9-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK9-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK9-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK9-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK9-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK9-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK9-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK9-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK9-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK9-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK9-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK9-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK9-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1110,10 +1099,10 @@ // CHECK9-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK9-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP54]] +// CHECK9-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK9-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP48]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -1326,7 +1315,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -1458,7 +1447,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[_TMP2:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1495,74 +1483,64 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* // CHECK10-NEXT: store i64 [[TMP7]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i64* -// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 4, i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP16]], align 8 +// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP18]], align 8 +// CHECK10-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i64* +// CHECK10-NEXT: store i64 [[TMP9]], i64* [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP21]], align 8 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP23]], align 8 +// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to i64* // CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP25]], align 8 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP27]], align 8 -// CHECK10-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 8, i64* [[TMP28]], align 8 -// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP29]], align 8 -// CHECK10-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP31]], align 8 -// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* -// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP33]], align 8 -// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK10-NEXT: store i64 8, i64* [[TMP34]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK10-NEXT: store i8* null, i8** [[TMP35]], align 8 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK10-NEXT: store i64 [[TMP11]], i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 -// CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP45:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP46:%.*]] = load i32, i32* [[M]], align 4 -// CHECK10-NEXT: store i32 [[TMP46]], i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK10-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP28]], align 8 +// CHECK10-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK10-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK10-NEXT: store i64 [[TMP3]], i64* [[TMP30]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK10-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP35]], align 8 +// CHECK10-NEXT: store i64 [[TMP11]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP39:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP40:%.*]] = load i32, i32* [[M]], align 4 +// CHECK10-NEXT: store i32 [[TMP40]], i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[CONV5:%.*]] = sext i32 [[DIV]] to i64 -// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 -// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP48]], 0 +// CHECK10-NEXT: [[TMP42:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_3]], align 4 +// CHECK10-NEXT: [[SUB6:%.*]] = sub nsw i32 [[TMP42]], 0 // CHECK10-NEXT: [[DIV7:%.*]] = sdiv i32 [[SUB6]], 1 // CHECK10-NEXT: [[CONV8:%.*]] = sext i32 [[DIV7]] to i64 // CHECK10-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV5]], [[CONV8]] // CHECK10-NEXT: [[SUB9:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK10-NEXT: store i64 [[SUB9]], i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[TMP49:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP49]], 1 +// CHECK10-NEXT: [[TMP43:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_4]], align 8 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP43]], 1 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK10-NEXT: [[TMP50:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP42]], i8** [[TMP43]], i64* [[TMP44]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP51:%.*]] = icmp ne i32 [[TMP50]], 0 -// CHECK10-NEXT: br i1 [[TMP51]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP44:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP37]], i8** [[TMP38]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP45:%.*]] = icmp ne i32 [[TMP44]], 0 +// CHECK10-NEXT: br i1 [[TMP45]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i64 [[TMP7]], i64 [[TMP9]], i64 [[TMP1]], i64 [[TMP3]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -1570,10 +1548,10 @@ // CHECK10-NEXT: [[TMP52:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef signext [[TMP52]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP53:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP53]]) -// CHECK10-NEXT: [[TMP54:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP54]] +// CHECK10-NEXT: [[TMP47:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP47]]) +// CHECK10-NEXT: [[TMP48:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP48]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -1786,7 +1764,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -1918,7 +1896,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -1952,74 +1929,64 @@ // CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK11-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK11-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK11-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK11-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK11-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK11-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK11-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK11-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK11-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK11-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK11-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK11-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK11-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK11-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK11-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK11-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK11-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK11-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK11-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK11-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK11-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK11-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK11-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK11-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK11-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK11-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK11-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK11-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2027,10 +1994,10 @@ // CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK11-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP53]] +// CHECK11-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK11-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP47]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -2239,7 +2206,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -2369,7 +2336,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [5 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [5 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [5 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[_TMP1:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 @@ -2403,74 +2369,64 @@ // CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* // CHECK12-NEXT: store i32 [[TMP5]], i32* [[TMP14]], align 4 -// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP18]], align 4 -// CHECK12-NEXT: [[TMP19:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to i32* -// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP20]], align 4 -// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32* +// CHECK12-NEXT: store i32 [[TMP7]], i32* [[TMP19]], align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP22]], align 4 +// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* // CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK12-NEXT: [[TMP27:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 4, i64* [[TMP27]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP28]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP30]], align 4 -// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32* -// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP32]], align 4 -// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK12-NEXT: store i64 4, i64* [[TMP33]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK12-NEXT: store i8* null, i8** [[TMP34]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP36]], align 4 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK12-NEXT: store i64 [[TMP10]], i64* [[TMP39]], align 4 -// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 -// CHECK12-NEXT: store i8* null, i8** [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [5 x i64], [5 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP44:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP44]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = load i32, i32* [[M]], align 4 -// CHECK12-NEXT: store i32 [[TMP45]], i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[TMP46:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP46]], 0 +// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP25]], align 4 +// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP27]], align 4 +// CHECK12-NEXT: [[TMP28:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK12-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* +// CHECK12-NEXT: store i32 [[TMP1]], i32* [[TMP29]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK12-NEXT: store i8* null, i8** [[TMP30]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP32]], align 4 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP34]], align 4 +// CHECK12-NEXT: store i64 [[TMP10]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 4), align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK12-NEXT: store i8* null, i8** [[TMP35]], align 4 +// CHECK12-NEXT: [[TMP36:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [5 x i8*], [5 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP38:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP38]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP39:%.*]] = load i32, i32* [[M]], align 4 +// CHECK12-NEXT: store i32 [[TMP39]], i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP40]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[CONV:%.*]] = sext i32 [[DIV]] to i64 -// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 -// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP47]], 0 +// CHECK12-NEXT: [[TMP41:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_2]], align 4 +// CHECK12-NEXT: [[SUB4:%.*]] = sub nsw i32 [[TMP41]], 0 // CHECK12-NEXT: [[DIV5:%.*]] = sdiv i32 [[SUB4]], 1 // CHECK12-NEXT: [[CONV6:%.*]] = sext i32 [[DIV5]] to i64 // CHECK12-NEXT: [[MUL:%.*]] = mul nsw i64 [[CONV]], [[CONV6]] // CHECK12-NEXT: [[SUB7:%.*]] = sub nsw i64 [[MUL]], 1 // CHECK12-NEXT: store i64 [[SUB7]], i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[TMP48:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP48]], 1 +// CHECK12-NEXT: [[TMP42:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR_3]], align 8 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i64 [[TMP42]], 1 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[ADD]]) -// CHECK12-NEXT: [[TMP49:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP41]], i8** [[TMP42]], i64* [[TMP43]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP50:%.*]] = icmp ne i32 [[TMP49]], 0 -// CHECK12-NEXT: br i1 [[TMP50]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP43:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83.region_id, i32 5, i8** [[TMP36]], i8** [[TMP37]], i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([5 x i64], [5 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP44:%.*]] = icmp ne i32 [[TMP43]], 0 +// CHECK12-NEXT: br i1 [[TMP44]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83(i32 [[TMP5]], i32 [[TMP7]], i32 [[TMP0]], i32 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] @@ -2478,10 +2434,10 @@ // CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10ELi2EEiT_(i32 noundef [[TMP51]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP52:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP52]]) -// CHECK12-NEXT: [[TMP53:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP53]] +// CHECK12-NEXT: [[TMP46:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP46]]) +// CHECK12-NEXT: [[TMP47:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP47]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l83 @@ -2690,7 +2646,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 20) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10ELi2EEiT__l69.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: diff --git a/clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp b/clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp --- a/clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp +++ b/clang/test/OpenMP/teams_distribute_simd_dist_schedule_codegen.cpp @@ -2104,7 +2104,6 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK9-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -2112,18 +2111,16 @@ // CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK9-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [3 x i8*], align 8 -// CHECK9-NEXT: [[DOTOFFLOAD_SIZES23:%.*]] = alloca [3 x i64], align 8 -// CHECK9-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK9-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[N_CASTED17:%.*]] = alloca i64, align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [3 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [3 x i8*], align 8 +// CHECK9-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [3 x i8*], align 8 +// CHECK9-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK9-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 // CHECK9-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK9-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK9-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -2145,170 +2142,152 @@ // CHECK9-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK9-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK9-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK9-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK9-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK9-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK9-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK9-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK9-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK9-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK9-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK9-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK9-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK9-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK9-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK9-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK9-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK9-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK9-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK9-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK9-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK9-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK9-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: // CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK9: omp_offload.cont: -// CHECK9-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK9-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK9-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK9-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK9-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK9-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK9-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK9-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK9-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK9-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK9-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK9-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK9-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK9-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK9-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK9-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK9-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK9-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK9-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK9-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK9-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK9-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK9-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK9-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK9-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK9-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK9-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK9-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK9-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK9: omp_offload.failed16: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK9: omp_offload.cont17: -// CHECK9-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* -// CHECK9-NEXT: store i32 [[TMP63]], i32* [[CONV19]], align 4 -// CHECK9-NEXT: [[TMP64:%.*]] = load i64, i64* [[N_CASTED18]], align 8 -// CHECK9-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK9-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP67]], align 8 -// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK9-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK9-NEXT: store i64 4, i64* [[TMP70]], align 8 -// CHECK9-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK9-NEXT: store i8* null, i8** [[TMP71]], align 8 -// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 -// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK9-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP75]], align 8 -// CHECK9-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 1 -// CHECK9-NEXT: store i64 8, i64* [[TMP76]], align 8 -// CHECK9-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK9-NEXT: store i8* null, i8** [[TMP77]], align 8 -// CHECK9-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP79]], align 8 -// CHECK9-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK9-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 8 -// CHECK9-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 2 -// CHECK9-NEXT: store i64 [[TMP65]], i64* [[TMP82]], align 8 -// CHECK9-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK9-NEXT: store i8* null, i8** [[TMP83]], align 8 -// CHECK9-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK9-NEXT: [[TMP87:%.*]] = load i32, i32* [[N]], align 4 -// CHECK9-NEXT: store i32 [[TMP87]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK9-NEXT: [[TMP88:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP88]], 0 -// CHECK9-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 -// CHECK9-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 -// CHECK9-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK9-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP89]], 1 -// CHECK9-NEXT: [[TMP90:%.*]] = zext i32 [[ADD30]] to i64 -// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP90]]) -// CHECK9-NEXT: [[TMP91:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP84]], i8** [[TMP85]], i64* [[TMP86]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK9-NEXT: [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0 -// CHECK9-NEXT: br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] -// CHECK9: omp_offload.failed31: -// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP64]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT32]] -// CHECK9: omp_offload.cont32: -// CHECK9-NEXT: [[TMP93:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP93]]) +// CHECK9-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK9-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK9-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK9-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK9-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK9-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK9-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK9-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK9-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK9-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK9-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK9-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK9: omp_offload.failed15: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK9: omp_offload.cont16: +// CHECK9-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: [[CONV18:%.*]] = bitcast i64* [[N_CASTED17]] to i32* +// CHECK9-NEXT: store i32 [[TMP55]], i32* [[CONV18]], align 4 +// CHECK9-NEXT: [[TMP56:%.*]] = load i64, i64* [[N_CASTED17]], align 8 +// CHECK9-NEXT: [[TMP57:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK9-NEXT: [[TMP58:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP59]], align 8 +// CHECK9-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK9-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK9-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0 +// CHECK9-NEXT: store i8* null, i8** [[TMP62]], align 8 +// CHECK9-NEXT: [[TMP63:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP64]], align 8 +// CHECK9-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 +// CHECK9-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK9-NEXT: store i64 [[TMP1]], i64* [[TMP66]], align 8 +// CHECK9-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1 +// CHECK9-NEXT: store i8* null, i8** [[TMP67]], align 8 +// CHECK9-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP69]], align 8 +// CHECK9-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 +// CHECK9-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK9-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 8 +// CHECK9-NEXT: store i64 [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK9-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2 +// CHECK9-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK9-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK9-NEXT: [[TMP75:%.*]] = load i32, i32* [[N]], align 4 +// CHECK9-NEXT: store i32 [[TMP75]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK9-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK9-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP76]], 0 +// CHECK9-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 +// CHECK9-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 +// CHECK9-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK9-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP77]], 1 +// CHECK9-NEXT: [[TMP78:%.*]] = zext i32 [[ADD28]] to i64 +// CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP78]]) +// CHECK9-NEXT: [[TMP79:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP73]], i8** [[TMP74]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP80:%.*]] = icmp ne i32 [[TMP79]], 0 +// CHECK9-NEXT: br i1 [[TMP80]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] +// CHECK9: omp_offload.failed29: +// CHECK9-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP56]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK9-NEXT: br label [[OMP_OFFLOAD_CONT30]] +// CHECK9: omp_offload.cont30: +// CHECK9-NEXT: [[TMP81:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK9-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP81]]) // CHECK9-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK9-NEXT: [[TMP94:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP94]]) -// CHECK9-NEXT: [[TMP95:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK9-NEXT: ret i32 [[TMP95]] +// CHECK9-NEXT: [[TMP82:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK9-NEXT: call void @llvm.stackrestore(i8* [[TMP82]]) +// CHECK9-NEXT: [[TMP83:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK9-NEXT: ret i32 [[TMP83]] // // // CHECK9-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -2577,11 +2556,11 @@ // CHECK9-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK9-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 // CHECK9-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2738,7 +2717,7 @@ // CHECK9-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK9-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK9-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK9: omp_offload.failed: @@ -2756,7 +2735,7 @@ // CHECK9-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK9-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK9-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK9: omp_offload.failed5: @@ -2774,7 +2753,7 @@ // CHECK9-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK9-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK9-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK9-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK9-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK9-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK9: omp_offload.failed11: @@ -2790,11 +2769,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2874,11 +2853,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -2958,11 +2937,11 @@ // CHECK9-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK9-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK9-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK9-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK9-NEXT: ret void // // -// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK9-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK9-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK9-NEXT: entry: // CHECK9-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3073,7 +3052,6 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 8 // CHECK10-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -3081,18 +3059,16 @@ // CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS5:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_PTRS6:%.*]] = alloca [3 x i8*], align 8 // CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS7:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES8:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP9:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 // CHECK10-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_11:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[N_CASTED18:%.*]] = alloca i64, align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS20:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_PTRS21:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS22:%.*]] = alloca [3 x i8*], align 8 -// CHECK10-NEXT: [[DOTOFFLOAD_SIZES23:%.*]] = alloca [3 x i64], align 8 -// CHECK10-NEXT: [[_TMP24:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_25:%.*]] = alloca i32, align 4 -// CHECK10-NEXT: [[DOTCAPTURE_EXPR_26:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[N_CASTED17:%.*]] = alloca i64, align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_BASEPTRS19:%.*]] = alloca [3 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_PTRS20:%.*]] = alloca [3 x i8*], align 8 +// CHECK10-NEXT: [[DOTOFFLOAD_MAPPERS21:%.*]] = alloca [3 x i8*], align 8 +// CHECK10-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 +// CHECK10-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 // CHECK10-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK10-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK10-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 8 @@ -3114,170 +3090,152 @@ // CHECK10-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i64* // CHECK10-NEXT: store i64 [[TMP4]], i64* [[TMP9]], align 8 -// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP10]], align 8 -// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP11]], align 8 -// CHECK10-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP13]], align 8 -// CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP15]], align 8 -// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP16]], align 8 -// CHECK10-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP17]], align 8 -// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP10]], align 8 +// CHECK10-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP12]], align 8 +// CHECK10-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP14]], align 8 +// CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP15]], align 8 +// CHECK10-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 8 +// CHECK10-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK10-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 8 -// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 8 -// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 8 -// CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP23]], align 8 -// CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK10-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP20]], align 8 +// CHECK10-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK10-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK10-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK10-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK10-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK10-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK10-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK10-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK10-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK10-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK10-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK10-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK10-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: // CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i64 [[TMP4]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK10: omp_offload.cont: -// CHECK10-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 // CHECK10-NEXT: [[CONV4:%.*]] = bitcast i64* [[N_CASTED3]] to i32* -// CHECK10-NEXT: store i32 [[TMP33]], i32* [[CONV4]], align 4 -// CHECK10-NEXT: [[TMP34:%.*]] = load i64, i64* [[N_CASTED3]], align 8 -// CHECK10-NEXT: [[TMP35:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP37]], align 8 -// CHECK10-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i64* -// CHECK10-NEXT: store i64 [[TMP34]], i64* [[TMP39]], align 8 -// CHECK10-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP40]], align 8 -// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i32 [[TMP29]], i32* [[CONV4]], align 4 +// CHECK10-NEXT: [[TMP30:%.*]] = load i64, i64* [[N_CASTED3]], align 8 +// CHECK10-NEXT: [[TMP31:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP32:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP33]], align 8 +// CHECK10-NEXT: [[TMP34:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to i64* +// CHECK10-NEXT: store i64 [[TMP30]], i64* [[TMP35]], align 8 +// CHECK10-NEXT: [[TMP36:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK10-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP38]], align 8 +// CHECK10-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP40]], align 8 +// CHECK10-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 // CHECK10-NEXT: store i8* null, i8** [[TMP41]], align 8 -// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP43]], align 8 -// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP45]], align 8 -// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP46]], align 8 -// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP47]], align 8 -// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP49]], align 8 -// CHECK10-NEXT: [[TMP50:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP51]], align 8 -// CHECK10-NEXT: [[TMP52:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP35]], i64* [[TMP52]], align 8 -// CHECK10-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP53]], align 8 -// CHECK10-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES8]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP57:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP57]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[TMP58:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK10-NEXT: [[SUB12:%.*]] = sub nsw i32 [[TMP58]], 0 -// CHECK10-NEXT: [[DIV13:%.*]] = sdiv i32 [[SUB12]], 1 -// CHECK10-NEXT: [[SUB14:%.*]] = sub nsw i32 [[DIV13]], 1 -// CHECK10-NEXT: store i32 [[SUB14]], i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_11]], align 4 -// CHECK10-NEXT: [[ADD15:%.*]] = add nsw i32 [[TMP59]], 1 -// CHECK10-NEXT: [[TMP60:%.*]] = zext i32 [[ADD15]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP60]]) -// CHECK10-NEXT: [[TMP61:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP54]], i8** [[TMP55]], i64* [[TMP56]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP62:%.*]] = icmp ne i32 [[TMP61]], 0 -// CHECK10-NEXT: br i1 [[TMP62]], label [[OMP_OFFLOAD_FAILED16:%.*]], label [[OMP_OFFLOAD_CONT17:%.*]] -// CHECK10: omp_offload.failed16: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP34]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT17]] -// CHECK10: omp_offload.cont17: -// CHECK10-NEXT: [[TMP63:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: [[CONV19:%.*]] = bitcast i64* [[N_CASTED18]] to i32* -// CHECK10-NEXT: store i32 [[TMP63]], i32* [[CONV19]], align 4 -// CHECK10-NEXT: [[TMP64:%.*]] = load i64, i64* [[N_CASTED18]], align 8 -// CHECK10-NEXT: [[TMP65:%.*]] = mul nuw i64 [[TMP1]], 4 -// CHECK10-NEXT: [[TMP66:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP67]], align 8 -// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i64* -// CHECK10-NEXT: store i64 [[TMP64]], i64* [[TMP69]], align 8 -// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK10-NEXT: store i64 4, i64* [[TMP70]], align 8 -// CHECK10-NEXT: [[TMP71:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 0 -// CHECK10-NEXT: store i8* null, i8** [[TMP71]], align 8 -// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP73]], align 8 -// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 1 -// CHECK10-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i64* -// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP75]], align 8 -// CHECK10-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 1 -// CHECK10-NEXT: store i64 8, i64* [[TMP76]], align 8 -// CHECK10-NEXT: [[TMP77:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 1 -// CHECK10-NEXT: store i8* null, i8** [[TMP77]], align 8 -// CHECK10-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP79]], align 8 -// CHECK10-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 2 -// CHECK10-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 8 -// CHECK10-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 2 -// CHECK10-NEXT: store i64 [[TMP65]], i64* [[TMP82]], align 8 -// CHECK10-NEXT: [[TMP83:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS22]], i64 0, i64 2 -// CHECK10-NEXT: store i8* null, i8** [[TMP83]], align 8 -// CHECK10-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS20]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS21]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES23]], i32 0, i32 0 -// CHECK10-NEXT: [[TMP87:%.*]] = load i32, i32* [[N]], align 4 -// CHECK10-NEXT: store i32 [[TMP87]], i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK10-NEXT: [[TMP88:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_25]], align 4 -// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[TMP88]], 0 -// CHECK10-NEXT: [[DIV28:%.*]] = sdiv i32 [[SUB27]], 1 -// CHECK10-NEXT: [[SUB29:%.*]] = sub nsw i32 [[DIV28]], 1 -// CHECK10-NEXT: store i32 [[SUB29]], i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[TMP89:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_26]], align 4 -// CHECK10-NEXT: [[ADD30:%.*]] = add nsw i32 [[TMP89]], 1 -// CHECK10-NEXT: [[TMP90:%.*]] = zext i32 [[ADD30]] to i64 -// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP90]]) -// CHECK10-NEXT: [[TMP91:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP84]], i8** [[TMP85]], i64* [[TMP86]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK10-NEXT: [[TMP92:%.*]] = icmp ne i32 [[TMP91]], 0 -// CHECK10-NEXT: br i1 [[TMP92]], label [[OMP_OFFLOAD_FAILED31:%.*]], label [[OMP_OFFLOAD_CONT32:%.*]] -// CHECK10: omp_offload.failed31: -// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP64]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] -// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT32]] -// CHECK10: omp_offload.cont32: -// CHECK10-NEXT: [[TMP93:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP93]]) +// CHECK10-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP43]], align 8 +// CHECK10-NEXT: [[TMP44:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP45]], align 8 +// CHECK10-NEXT: store i64 [[TMP31]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP46:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS7]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK10-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS5]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS6]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP49:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP49]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[TMP50:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK10-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP50]], 0 +// CHECK10-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 +// CHECK10-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 +// CHECK10-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 +// CHECK10-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP51]], 1 +// CHECK10-NEXT: [[TMP52:%.*]] = zext i32 [[ADD14]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP52]]) +// CHECK10-NEXT: [[TMP53:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP47]], i8** [[TMP48]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP54:%.*]] = icmp ne i32 [[TMP53]], 0 +// CHECK10-NEXT: br i1 [[TMP54]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] +// CHECK10: omp_offload.failed15: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i64 [[TMP30]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT16]] +// CHECK10: omp_offload.cont16: +// CHECK10-NEXT: [[TMP55:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: [[CONV18:%.*]] = bitcast i64* [[N_CASTED17]] to i32* +// CHECK10-NEXT: store i32 [[TMP55]], i32* [[CONV18]], align 4 +// CHECK10-NEXT: [[TMP56:%.*]] = load i64, i64* [[N_CASTED17]], align 8 +// CHECK10-NEXT: [[TMP57:%.*]] = mul nuw i64 [[TMP1]], 4 +// CHECK10-NEXT: [[TMP58:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP59]], align 8 +// CHECK10-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i64* +// CHECK10-NEXT: store i64 [[TMP56]], i64* [[TMP61]], align 8 +// CHECK10-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 0 +// CHECK10-NEXT: store i8* null, i8** [[TMP62]], align 8 +// CHECK10-NEXT: [[TMP63:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP64:%.*]] = bitcast i8** [[TMP63]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP64]], align 8 +// CHECK10-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 1 +// CHECK10-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i64* +// CHECK10-NEXT: store i64 [[TMP1]], i64* [[TMP66]], align 8 +// CHECK10-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 1 +// CHECK10-NEXT: store i8* null, i8** [[TMP67]], align 8 +// CHECK10-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP69]], align 8 +// CHECK10-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 2 +// CHECK10-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK10-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 8 +// CHECK10-NEXT: store i64 [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 8 +// CHECK10-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS21]], i64 0, i64 2 +// CHECK10-NEXT: store i8* null, i8** [[TMP72]], align 8 +// CHECK10-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS19]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS20]], i32 0, i32 0 +// CHECK10-NEXT: [[TMP75:%.*]] = load i32, i32* [[N]], align 4 +// CHECK10-NEXT: store i32 [[TMP75]], i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK10-NEXT: [[TMP76:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 +// CHECK10-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP76]], 0 +// CHECK10-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 +// CHECK10-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 +// CHECK10-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[TMP77:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 +// CHECK10-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP77]], 1 +// CHECK10-NEXT: [[TMP78:%.*]] = zext i32 [[ADD28]] to i64 +// CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP78]]) +// CHECK10-NEXT: [[TMP79:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP73]], i8** [[TMP74]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP80:%.*]] = icmp ne i32 [[TMP79]], 0 +// CHECK10-NEXT: br i1 [[TMP80]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] +// CHECK10: omp_offload.failed29: +// CHECK10-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i64 [[TMP56]], i64 [[TMP1]], i32* [[VLA]]) #[[ATTR3]] +// CHECK10-NEXT: br label [[OMP_OFFLOAD_CONT30]] +// CHECK10: omp_offload.cont30: +// CHECK10-NEXT: [[TMP81:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK10-NEXT: [[CALL:%.*]] = call noundef signext i32 @_Z5tmainIiLi10EEiT_(i32 noundef signext [[TMP81]]) // CHECK10-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK10-NEXT: [[TMP94:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP94]]) -// CHECK10-NEXT: [[TMP95:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK10-NEXT: ret i32 [[TMP95]] +// CHECK10-NEXT: [[TMP82:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK10-NEXT: call void @llvm.stackrestore(i8* [[TMP82]]) +// CHECK10-NEXT: [[TMP83:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK10-NEXT: ret i32 [[TMP83]] // // // CHECK10-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -3546,11 +3504,11 @@ // CHECK10-NEXT: [[CONV1:%.*]] = bitcast i64* [[DOTCAPTURE_EXPR__CASTED]] to i32* // CHECK10-NEXT: store i32 [[TMP3]], i32* [[CONV1]], align 4 // CHECK10-NEXT: [[TMP4:%.*]] = load i64, i64* [[DOTCAPTURE_EXPR__CASTED]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i64, i32*, i64)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[CONV]], i64 [[TMP0]], i32* [[TMP1]], i64 [[TMP4]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i64 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i64 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3707,7 +3665,7 @@ // CHECK10-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK10-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK10-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK10: omp_offload.failed: @@ -3725,7 +3683,7 @@ // CHECK10-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK10-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK10-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK10: omp_offload.failed5: @@ -3743,7 +3701,7 @@ // CHECK10-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK10-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK10-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK10-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK10-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK10-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK10: omp_offload.failed11: @@ -3759,11 +3717,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3843,11 +3801,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -3927,11 +3885,11 @@ // CHECK10-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 8 // CHECK10-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 8 // CHECK10-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 8 -// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK10-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK10-NEXT: ret void // // -// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK10-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK10-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK10-NEXT: entry: // CHECK10-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 @@ -4042,7 +4000,6 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK11-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -4050,18 +4007,16 @@ // CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK11-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [3 x i8*], align 4 -// CHECK11-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [3 x i64], align 4 -// CHECK11-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK11-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [3 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [3 x i8*], align 4 +// CHECK11-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [3 x i8*], align 4 +// CHECK11-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK11-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK11-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK11-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK11-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -4082,170 +4037,152 @@ // CHECK11-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK11-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK11-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK11-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK11-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK11-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK11-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK11-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK11-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK11-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK11-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK11-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK11-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK11-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK11-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK11-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK11-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK11-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK11-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK11-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK11-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK11-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK11-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: // CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK11: omp_offload.cont: -// CHECK11-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK11-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK11-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK11-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK11-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK11-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK11-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK11-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK11-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK11-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK11-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK11-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK11-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK11-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK11-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK11-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK11-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK11-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK11-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK11-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK11-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK11-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK11-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK11-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK11-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK11-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK11: omp_offload.failed15: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK11: omp_offload.cont16: -// CHECK11-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP64]], i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK11-NEXT: [[TMP66:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK11-NEXT: [[TMP67:%.*]] = sext i32 [[TMP66]] to i64 -// CHECK11-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP69]], align 4 -// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK11-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: store i64 4, i64* [[TMP72]], align 4 -// CHECK11-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK11-NEXT: store i8* null, i8** [[TMP73]], align 4 -// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 -// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK11-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP77]], align 4 -// CHECK11-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK11-NEXT: store i64 4, i64* [[TMP78]], align 4 -// CHECK11-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 -// CHECK11-NEXT: store i8* null, i8** [[TMP79]], align 4 -// CHECK11-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 4 -// CHECK11-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK11-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32** -// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP83]], align 4 -// CHECK11-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK11-NEXT: store i64 [[TMP67]], i64* [[TMP84]], align 4 -// CHECK11-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK11-NEXT: store i8* null, i8** [[TMP85]], align 4 -// CHECK11-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK11-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 -// CHECK11-NEXT: store i32 [[TMP89]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP90]], 0 -// CHECK11-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK11-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK11-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[TMP91:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK11-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP91]], 1 -// CHECK11-NEXT: [[TMP92:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP92]]) -// CHECK11-NEXT: [[TMP93:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP86]], i8** [[TMP87]], i64* [[TMP88]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK11-NEXT: [[TMP94:%.*]] = icmp ne i32 [[TMP93]], 0 -// CHECK11-NEXT: br i1 [[TMP94]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK11: omp_offload.failed29: -// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP65]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK11: omp_offload.cont30: -// CHECK11-NEXT: [[TMP95:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP95]]) +// CHECK11-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK11-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK11-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK11-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK11-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK11-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK11-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK11-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK11-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK11-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK11-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK11-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK11: omp_offload.failed14: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK11: omp_offload.cont15: +// CHECK11-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP56]], i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP57:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK11-NEXT: [[TMP58:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK11-NEXT: [[TMP59:%.*]] = sext i32 [[TMP58]] to i64 +// CHECK11-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP61]], align 4 +// CHECK11-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK11-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK11-NEXT: [[TMP64:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK11-NEXT: store i8* null, i8** [[TMP64]], align 4 +// CHECK11-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP66]], align 4 +// CHECK11-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK11-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK11-NEXT: store i32 [[TMP0]], i32* [[TMP68]], align 4 +// CHECK11-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK11-NEXT: store i8* null, i8** [[TMP69]], align 4 +// CHECK11-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 4 +// CHECK11-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK11-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32** +// CHECK11-NEXT: store i32* [[VLA]], i32** [[TMP73]], align 4 +// CHECK11-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK11-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK11-NEXT: store i8* null, i8** [[TMP74]], align 4 +// CHECK11-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK11-NEXT: [[TMP77:%.*]] = load i32, i32* [[N]], align 4 +// CHECK11-NEXT: store i32 [[TMP77]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK11-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP78]], 0 +// CHECK11-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK11-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK11-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK11-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP79]], 1 +// CHECK11-NEXT: [[TMP80:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP80]]) +// CHECK11-NEXT: [[TMP81:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP75]], i8** [[TMP76]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP82:%.*]] = icmp ne i32 [[TMP81]], 0 +// CHECK11-NEXT: br i1 [[TMP82]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK11: omp_offload.failed27: +// CHECK11-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP57]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK11-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK11: omp_offload.cont28: +// CHECK11-NEXT: [[TMP83:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK11-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP83]]) // CHECK11-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK11-NEXT: [[TMP96:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP96]]) -// CHECK11-NEXT: [[TMP97:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK11-NEXT: ret i32 [[TMP97]] +// CHECK11-NEXT: [[TMP84:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK11-NEXT: call void @llvm.stackrestore(i8* [[TMP84]]) +// CHECK11-NEXT: [[TMP85:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK11-NEXT: ret i32 [[TMP85]] // // // CHECK11-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -4508,11 +4445,11 @@ // CHECK11-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK11-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK11-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4667,7 +4604,7 @@ // CHECK11-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK11-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK11-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK11: omp_offload.failed: @@ -4685,7 +4622,7 @@ // CHECK11-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK11-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK11-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK11: omp_offload.failed5: @@ -4703,7 +4640,7 @@ // CHECK11-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK11-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK11-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK11-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK11-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK11-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK11: omp_offload.failed11: @@ -4719,11 +4656,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4802,11 +4739,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4885,11 +4822,11 @@ // CHECK11-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK11-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK11-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK11-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK11-NEXT: ret void // // -// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK11-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK11-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK11-NEXT: entry: // CHECK11-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -4999,7 +4936,6 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [3 x i64], align 4 // CHECK12-NEXT: [[TMP:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_1:%.*]] = alloca i32, align 4 @@ -5007,18 +4943,16 @@ // CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS4:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_PTRS5:%.*]] = alloca [3 x i8*], align 4 // CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS6:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES7:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP8:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[_TMP7:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_8:%.*]] = alloca i32, align 4 // CHECK12-NEXT: [[DOTCAPTURE_EXPR_9:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_10:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[N_CASTED17:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS18:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_PTRS19:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS20:%.*]] = alloca [3 x i8*], align 4 -// CHECK12-NEXT: [[DOTOFFLOAD_SIZES21:%.*]] = alloca [3 x i64], align 4 -// CHECK12-NEXT: [[_TMP22:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_23:%.*]] = alloca i32, align 4 -// CHECK12-NEXT: [[DOTCAPTURE_EXPR_24:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[N_CASTED16:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_BASEPTRS17:%.*]] = alloca [3 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_PTRS18:%.*]] = alloca [3 x i8*], align 4 +// CHECK12-NEXT: [[DOTOFFLOAD_MAPPERS19:%.*]] = alloca [3 x i8*], align 4 +// CHECK12-NEXT: [[_TMP20:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_21:%.*]] = alloca i32, align 4 +// CHECK12-NEXT: [[DOTCAPTURE_EXPR_22:%.*]] = alloca i32, align 4 // CHECK12-NEXT: store i32 0, i32* [[RETVAL]], align 4 // CHECK12-NEXT: store i32 [[ARGC]], i32* [[ARGC_ADDR]], align 4 // CHECK12-NEXT: store i8** [[ARGV]], i8*** [[ARGV_ADDR]], align 4 @@ -5039,170 +4973,152 @@ // CHECK12-NEXT: [[TMP8:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP9:%.*]] = bitcast i8** [[TMP8]] to i32* // CHECK12-NEXT: store i32 [[TMP3]], i32* [[TMP9]], align 4 -// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP10]], align 4 -// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP11]], align 4 -// CHECK12-NEXT: [[TMP12:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP13:%.*]] = bitcast i8** [[TMP12]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP13]], align 4 -// CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP15:%.*]] = bitcast i8** [[TMP14]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP15]], align 4 -// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP16]], align 4 -// CHECK12-NEXT: [[TMP17:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP17]], align 4 -// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP10:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP10]], align 4 +// CHECK12-NEXT: [[TMP11:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP12:%.*]] = bitcast i8** [[TMP11]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP12]], align 4 +// CHECK12-NEXT: [[TMP13:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP14:%.*]] = bitcast i8** [[TMP13]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP14]], align 4 +// CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP15]], align 4 +// CHECK12-NEXT: [[TMP16:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP17:%.*]] = bitcast i8** [[TMP16]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP17]], align 4 +// CHECK12-NEXT: [[TMP18:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK12-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to i32** // CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP19]], align 4 -// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP21]], align 4 -// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP5]], i64* [[TMP22]], align 4 -// CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP23]], align 4 -// CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP25:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP26:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP27:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP27]], i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[TMP28:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 -// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP28]], 0 +// CHECK12-NEXT: store i64 [[TMP5]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP20:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP20]], align 4 +// CHECK12-NEXT: [[TMP21:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP22:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP23:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP23]], i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[TMP24:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 +// CHECK12-NEXT: [[SUB:%.*]] = sub nsw i32 [[TMP24]], 0 // CHECK12-NEXT: [[DIV:%.*]] = sdiv i32 [[SUB]], 1 // CHECK12-NEXT: [[SUB2:%.*]] = sub nsw i32 [[DIV]], 1 // CHECK12-NEXT: store i32 [[SUB2]], i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 -// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP29]], 1 -// CHECK12-NEXT: [[TMP30:%.*]] = zext i32 [[ADD]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP30]]) -// CHECK12-NEXT: [[TMP31:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP24]], i8** [[TMP25]], i64* [[TMP26]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP32:%.*]] = icmp ne i32 [[TMP31]], 0 -// CHECK12-NEXT: br i1 [[TMP32]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK12-NEXT: [[TMP25:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_1]], align 4 +// CHECK12-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP25]], 1 +// CHECK12-NEXT: [[TMP26:%.*]] = zext i32 [[ADD]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2:[0-9]+]], i64 -1, i64 [[TMP26]]) +// CHECK12-NEXT: [[TMP27:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100.region_id, i32 3, i8** [[TMP21]], i8** [[TMP22]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP28:%.*]] = icmp ne i32 [[TMP27]], 0 +// CHECK12-NEXT: br i1 [[TMP28]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: // CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100(i32 [[TMP3]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3:[0-9]+]] // CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK12: omp_offload.cont: -// CHECK12-NEXT: [[TMP33:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP33]], i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP34:%.*]] = load i32, i32* [[N_CASTED3]], align 4 -// CHECK12-NEXT: [[TMP35:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP36:%.*]] = sext i32 [[TMP35]] to i64 -// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP38]], align 4 -// CHECK12-NEXT: [[TMP39:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK12-NEXT: store i32 [[TMP34]], i32* [[TMP40]], align 4 -// CHECK12-NEXT: [[TMP41:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP29:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP29]], i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP30:%.*]] = load i32, i32* [[N_CASTED3]], align 4 +// CHECK12-NEXT: [[TMP31:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP32:%.*]] = sext i32 [[TMP31]] to i64 +// CHECK12-NEXT: [[TMP33:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP34]], align 4 +// CHECK12-NEXT: [[TMP35:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK12-NEXT: store i32 [[TMP30]], i32* [[TMP36]], align 4 +// CHECK12-NEXT: [[TMP37:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK12-NEXT: [[TMP38:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP39]], align 4 +// CHECK12-NEXT: [[TMP40:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP41]], align 4 +// CHECK12-NEXT: [[TMP42:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 // CHECK12-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP44]], align 4 -// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP46]], align 4 -// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP50]], align 4 -// CHECK12-NEXT: [[TMP51:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP52]], align 4 -// CHECK12-NEXT: [[TMP53:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP36]], i64* [[TMP53]], align 4 -// CHECK12-NEXT: [[TMP54:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK12-NEXT: [[TMP55:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP56:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP57:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES7]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP58:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP58]], i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[TMP59:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 -// CHECK12-NEXT: [[SUB11:%.*]] = sub nsw i32 [[TMP59]], 0 -// CHECK12-NEXT: [[DIV12:%.*]] = sdiv i32 [[SUB11]], 1 -// CHECK12-NEXT: [[SUB13:%.*]] = sub nsw i32 [[DIV12]], 1 -// CHECK12-NEXT: store i32 [[SUB13]], i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[TMP60:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_10]], align 4 -// CHECK12-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP60]], 1 -// CHECK12-NEXT: [[TMP61:%.*]] = zext i32 [[ADD14]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP61]]) -// CHECK12-NEXT: [[TMP62:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP55]], i8** [[TMP56]], i64* [[TMP57]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP63:%.*]] = icmp ne i32 [[TMP62]], 0 -// CHECK12-NEXT: br i1 [[TMP63]], label [[OMP_OFFLOAD_FAILED15:%.*]], label [[OMP_OFFLOAD_CONT16:%.*]] -// CHECK12: omp_offload.failed15: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP34]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT16]] -// CHECK12: omp_offload.cont16: -// CHECK12-NEXT: [[TMP64:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP64]], i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP65:%.*]] = load i32, i32* [[N_CASTED17]], align 4 -// CHECK12-NEXT: [[TMP66:%.*]] = mul nuw i32 [[TMP0]], 4 -// CHECK12-NEXT: [[TMP67:%.*]] = sext i32 [[TMP66]] to i64 -// CHECK12-NEXT: [[TMP68:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP69]], align 4 -// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32* -// CHECK12-NEXT: store i32 [[TMP65]], i32* [[TMP71]], align 4 -// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: store i64 4, i64* [[TMP72]], align 4 -// CHECK12-NEXT: [[TMP73:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 0 -// CHECK12-NEXT: store i8* null, i8** [[TMP73]], align 4 -// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP75]], align 4 -// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 1 -// CHECK12-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP77]], align 4 -// CHECK12-NEXT: [[TMP78:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 1 -// CHECK12-NEXT: store i64 4, i64* [[TMP78]], align 4 -// CHECK12-NEXT: [[TMP79:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 1 -// CHECK12-NEXT: store i8* null, i8** [[TMP79]], align 4 -// CHECK12-NEXT: [[TMP80:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP81:%.*]] = bitcast i8** [[TMP80]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP81]], align 4 -// CHECK12-NEXT: [[TMP82:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 2 -// CHECK12-NEXT: [[TMP83:%.*]] = bitcast i8** [[TMP82]] to i32** -// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP83]], align 4 -// CHECK12-NEXT: [[TMP84:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 2 -// CHECK12-NEXT: store i64 [[TMP67]], i64* [[TMP84]], align 4 -// CHECK12-NEXT: [[TMP85:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS20]], i32 0, i32 2 -// CHECK12-NEXT: store i8* null, i8** [[TMP85]], align 4 -// CHECK12-NEXT: [[TMP86:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS18]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP87:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS19]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP88:%.*]] = getelementptr inbounds [3 x i64], [3 x i64]* [[DOTOFFLOAD_SIZES21]], i32 0, i32 0 -// CHECK12-NEXT: [[TMP89:%.*]] = load i32, i32* [[N]], align 4 -// CHECK12-NEXT: store i32 [[TMP89]], i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[TMP90:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_23]], align 4 -// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[TMP90]], 0 -// CHECK12-NEXT: [[DIV26:%.*]] = sdiv i32 [[SUB25]], 1 -// CHECK12-NEXT: [[SUB27:%.*]] = sub nsw i32 [[DIV26]], 1 -// CHECK12-NEXT: store i32 [[SUB27]], i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[TMP91:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_24]], align 4 -// CHECK12-NEXT: [[ADD28:%.*]] = add nsw i32 [[TMP91]], 1 -// CHECK12-NEXT: [[TMP92:%.*]] = zext i32 [[ADD28]] to i64 -// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP92]]) -// CHECK12-NEXT: [[TMP93:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP86]], i8** [[TMP87]], i64* [[TMP88]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.4, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) -// CHECK12-NEXT: [[TMP94:%.*]] = icmp ne i32 [[TMP93]], 0 -// CHECK12-NEXT: br i1 [[TMP94]], label [[OMP_OFFLOAD_FAILED29:%.*]], label [[OMP_OFFLOAD_CONT30:%.*]] -// CHECK12: omp_offload.failed29: -// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP65]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] -// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT30]] -// CHECK12: omp_offload.cont30: -// CHECK12-NEXT: [[TMP95:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 -// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP95]]) +// CHECK12-NEXT: [[TMP43:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP44]], align 4 +// CHECK12-NEXT: [[TMP45:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP46]], align 4 +// CHECK12-NEXT: store i64 [[TMP32]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP47:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS6]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK12-NEXT: [[TMP48:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS4]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP49:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS5]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP50:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP50]], i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[TMP51:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_8]], align 4 +// CHECK12-NEXT: [[SUB10:%.*]] = sub nsw i32 [[TMP51]], 0 +// CHECK12-NEXT: [[DIV11:%.*]] = sdiv i32 [[SUB10]], 1 +// CHECK12-NEXT: [[SUB12:%.*]] = sub nsw i32 [[DIV11]], 1 +// CHECK12-NEXT: store i32 [[SUB12]], i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[TMP52:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_9]], align 4 +// CHECK12-NEXT: [[ADD13:%.*]] = add nsw i32 [[TMP52]], 1 +// CHECK12-NEXT: [[TMP53:%.*]] = zext i32 [[ADD13]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP53]]) +// CHECK12-NEXT: [[TMP54:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105.region_id, i32 3, i8** [[TMP48]], i8** [[TMP49]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP55:%.*]] = icmp ne i32 [[TMP54]], 0 +// CHECK12-NEXT: br i1 [[TMP55]], label [[OMP_OFFLOAD_FAILED14:%.*]], label [[OMP_OFFLOAD_CONT15:%.*]] +// CHECK12: omp_offload.failed14: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l105(i32 [[TMP30]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT15]] +// CHECK12: omp_offload.cont15: +// CHECK12-NEXT: [[TMP56:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP56]], i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP57:%.*]] = load i32, i32* [[N_CASTED16]], align 4 +// CHECK12-NEXT: [[TMP58:%.*]] = mul nuw i32 [[TMP0]], 4 +// CHECK12-NEXT: [[TMP59:%.*]] = sext i32 [[TMP58]] to i64 +// CHECK12-NEXT: [[TMP60:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP61]], align 4 +// CHECK12-NEXT: [[TMP62:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32* +// CHECK12-NEXT: store i32 [[TMP57]], i32* [[TMP63]], align 4 +// CHECK12-NEXT: [[TMP64:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 0 +// CHECK12-NEXT: store i8* null, i8** [[TMP64]], align 4 +// CHECK12-NEXT: [[TMP65:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP66]], align 4 +// CHECK12-NEXT: [[TMP67:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 1 +// CHECK12-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK12-NEXT: store i32 [[TMP0]], i32* [[TMP68]], align 4 +// CHECK12-NEXT: [[TMP69:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 1 +// CHECK12-NEXT: store i8* null, i8** [[TMP69]], align 4 +// CHECK12-NEXT: [[TMP70:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP71]], align 4 +// CHECK12-NEXT: [[TMP72:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 2 +// CHECK12-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32** +// CHECK12-NEXT: store i32* [[VLA]], i32** [[TMP73]], align 4 +// CHECK12-NEXT: store i64 [[TMP59]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 2), align 4 +// CHECK12-NEXT: [[TMP74:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_MAPPERS19]], i32 0, i32 2 +// CHECK12-NEXT: store i8* null, i8** [[TMP74]], align 4 +// CHECK12-NEXT: [[TMP75:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_BASEPTRS17]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP76:%.*]] = getelementptr inbounds [3 x i8*], [3 x i8*]* [[DOTOFFLOAD_PTRS18]], i32 0, i32 0 +// CHECK12-NEXT: [[TMP77:%.*]] = load i32, i32* [[N]], align 4 +// CHECK12-NEXT: store i32 [[TMP77]], i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[TMP78:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_21]], align 4 +// CHECK12-NEXT: [[SUB23:%.*]] = sub nsw i32 [[TMP78]], 0 +// CHECK12-NEXT: [[DIV24:%.*]] = sdiv i32 [[SUB23]], 1 +// CHECK12-NEXT: [[SUB25:%.*]] = sub nsw i32 [[DIV24]], 1 +// CHECK12-NEXT: store i32 [[SUB25]], i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[TMP79:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_22]], align 4 +// CHECK12-NEXT: [[ADD26:%.*]] = add nsw i32 [[TMP79]], 1 +// CHECK12-NEXT: [[TMP80:%.*]] = zext i32 [[ADD26]] to i64 +// CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 [[TMP80]]) +// CHECK12-NEXT: [[TMP81:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110.region_id, i32 3, i8** [[TMP75]], i8** [[TMP76]], i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_sizes.5, i32 0, i32 0), i64* getelementptr inbounds ([3 x i64], [3 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP82:%.*]] = icmp ne i32 [[TMP81]], 0 +// CHECK12-NEXT: br i1 [[TMP82]], label [[OMP_OFFLOAD_FAILED27:%.*]], label [[OMP_OFFLOAD_CONT28:%.*]] +// CHECK12: omp_offload.failed27: +// CHECK12-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l110(i32 [[TMP57]], i32 [[TMP0]], i32* [[VLA]]) #[[ATTR3]] +// CHECK12-NEXT: br label [[OMP_OFFLOAD_CONT28]] +// CHECK12: omp_offload.cont28: +// CHECK12-NEXT: [[TMP83:%.*]] = load i32, i32* [[ARGC_ADDR]], align 4 +// CHECK12-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiLi10EEiT_(i32 noundef [[TMP83]]) // CHECK12-NEXT: store i32 [[CALL]], i32* [[RETVAL]], align 4 -// CHECK12-NEXT: [[TMP96:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP96]]) -// CHECK12-NEXT: [[TMP97:%.*]] = load i32, i32* [[RETVAL]], align 4 -// CHECK12-NEXT: ret i32 [[TMP97]] +// CHECK12-NEXT: [[TMP84:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK12-NEXT: call void @llvm.stackrestore(i8* [[TMP84]]) +// CHECK12-NEXT: [[TMP85:%.*]] = load i32, i32* [[RETVAL]], align 4 +// CHECK12-NEXT: ret i32 [[TMP85]] // // // CHECK12-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l100 @@ -5465,11 +5381,11 @@ // CHECK12-NEXT: [[TMP3:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR_]], align 4 // CHECK12-NEXT: store i32 [[TMP3]], i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 // CHECK12-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTCAPTURE_EXPR__CASTED]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..3 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, i32, i32*, i32)* @.omp_outlined..4 to void (i32*, i32*, ...)*), i32* [[N_ADDR]], i32 [[TMP0]], i32* [[TMP1]], i32 [[TMP4]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..3 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..4 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[N:%.*]], i32 noundef [[VLA:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[A:%.*]], i32 noundef [[DOTCAPTURE_EXPR_:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5624,7 +5540,7 @@ // CHECK12-NEXT: [[TMP5:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 // CHECK12-NEXT: [[TMP6:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.6, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP7:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l79.region_id, i32 1, i8** [[TMP5]], i8** [[TMP6]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP8:%.*]] = icmp ne i32 [[TMP7]], 0 // CHECK12-NEXT: br i1 [[TMP8]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK12: omp_offload.failed: @@ -5642,7 +5558,7 @@ // CHECK12-NEXT: [[TMP14:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS1]], i32 0, i32 0 // CHECK12-NEXT: [[TMP15:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS2]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.8, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.9, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP16:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l84.region_id, i32 1, i8** [[TMP14]], i8** [[TMP15]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP17:%.*]] = icmp ne i32 [[TMP16]], 0 // CHECK12-NEXT: br i1 [[TMP17]], label [[OMP_OFFLOAD_FAILED5:%.*]], label [[OMP_OFFLOAD_CONT6:%.*]] // CHECK12: omp_offload.failed5: @@ -5660,7 +5576,7 @@ // CHECK12-NEXT: [[TMP23:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_BASEPTRS7]], i32 0, i32 0 // CHECK12-NEXT: [[TMP24:%.*]] = getelementptr inbounds [1 x i8*], [1 x i8*]* [[DOTOFFLOAD_PTRS8]], i32 0, i32 0 // CHECK12-NEXT: call void @__kmpc_push_target_tripcount_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i64 10) -// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.11, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.12, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) +// CHECK12-NEXT: [[TMP25:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB2]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z5tmainIiLi10EEiT__l89.region_id, i32 1, i8** [[TMP23]], i8** [[TMP24]], i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_sizes.14, i32 0, i32 0), i64* getelementptr inbounds ([1 x i64], [1 x i64]* @.offload_maptypes.15, i32 0, i32 0), i8** null, i8** null, i32 0, i32 1) // CHECK12-NEXT: [[TMP26:%.*]] = icmp ne i32 [[TMP25]], 0 // CHECK12-NEXT: br i1 [[TMP26]], label [[OMP_OFFLOAD_FAILED11:%.*]], label [[OMP_OFFLOAD_CONT12:%.*]] // CHECK12: omp_offload.failed11: @@ -5676,11 +5592,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..5 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..5 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5759,11 +5675,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..7 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..7 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 @@ -5842,11 +5758,11 @@ // CHECK12-NEXT: [[A_ADDR:%.*]] = alloca [10 x i32]*, align 4 // CHECK12-NEXT: store [10 x i32]* [[A]], [10 x i32]** [[A_ADDR]], align 4 // CHECK12-NEXT: [[TMP0:%.*]] = load [10 x i32]*, [10 x i32]** [[A_ADDR]], align 4 -// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..10 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) +// CHECK12-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_teams(%struct.ident_t* @[[GLOB2]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [10 x i32]*)* @.omp_outlined..13 to void (i32*, i32*, ...)*), [10 x i32]* [[TMP0]]) // CHECK12-NEXT: ret void // // -// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..10 +// CHECK12-LABEL: define {{[^@]+}}@.omp_outlined..13 // CHECK12-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [10 x i32]* noundef nonnull align 4 dereferenceable(40) [[A:%.*]]) #[[ATTR2]] { // CHECK12-NEXT: entry: // CHECK12-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 4 diff --git a/clang/test/OpenMP/teams_firstprivate_codegen.cpp b/clang/test/OpenMP/teams_firstprivate_codegen.cpp --- a/clang/test/OpenMP/teams_firstprivate_codegen.cpp +++ b/clang/test/OpenMP/teams_firstprivate_codegen.cpp @@ -3325,7 +3325,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [8 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [8 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [8 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [8 x i64], align 8 // CHECK17-NEXT: store float* [[A]], float** [[A_ADDR]], align 8 // CHECK17-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -3357,92 +3356,76 @@ // CHECK17-NEXT: [[TMP17:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to float** // CHECK17-NEXT: store float* [[TMP8]], float** [[TMP18]], align 8 -// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 0, i64* [[TMP19]], align 8 -// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP20]], align 8 -// CHECK17-NEXT: [[TMP21:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.St** -// CHECK17-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP22]], align 8 -// CHECK17-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to %struct.St** -// CHECK17-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP24]], align 8 -// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 0, i64* [[TMP25]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP26]], align 8 -// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP19:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK17-NEXT: [[TMP20:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.St** +// CHECK17-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP21]], align 8 +// CHECK17-NEXT: [[TMP22:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.St** +// CHECK17-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP23]], align 8 +// CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK17-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP26]], align 8 +// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* // CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP28]], align 8 -// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP30]], align 8 -// CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 8, i64* [[TMP31]], align 8 -// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP32]], align 8 -// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to ppc_fp128** -// CHECK17-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP34]], align 8 -// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to ppc_fp128** -// CHECK17-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP36]], align 8 -// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 0, i64* [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP38]], align 8 -// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* -// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP40]], align 8 -// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i64* -// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP42]], align 8 -// CHECK17-NEXT: [[TMP43:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 8, i64* [[TMP43]], align 8 -// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to ppc_fp128** +// CHECK17-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP31]], align 8 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to ppc_fp128** +// CHECK17-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP33]], align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK17-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i64* +// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP36]], align 8 +// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP38]], align 8 +// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8 +// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP41]], align 8 +// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP43]], align 8 +// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK17-NEXT: store i8* null, i8** [[TMP44]], align 8 -// CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK17-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP46]], align 8 -// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK17-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP48]], align 8 -// CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK17-NEXT: store i64 8, i64* [[TMP49]], align 8 -// CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK17-NEXT: store i8* null, i8** [[TMP50]], align 8 -// CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to double** -// CHECK17-NEXT: store double* [[VLA]], double** [[TMP52]], align 8 -// CHECK17-NEXT: [[TMP53:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double** -// CHECK17-NEXT: store double* [[VLA]], double** [[TMP54]], align 8 -// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK17-NEXT: store i64 [[TMP14]], i64* [[TMP55]], align 8 -// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 -// CHECK17-NEXT: store i8* null, i8** [[TMP56]], align 8 -// CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i64* -// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP58]], align 8 -// CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* -// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP60]], align 8 -// CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK17-NEXT: store i64 4, i64* [[TMP61]], align 8 -// CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 -// CHECK17-NEXT: store i8* null, i8** [[TMP62]], align 8 -// CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP65:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP66:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPg_l152.region_id, i32 8, i8** [[TMP63]], i8** [[TMP64]], i64* [[TMP65]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0 -// CHECK17-NEXT: br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK17-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double** +// CHECK17-NEXT: store double* [[VLA]], double** [[TMP46]], align 8 +// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK17-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to double** +// CHECK17-NEXT: store double* [[VLA]], double** [[TMP48]], align 8 +// CHECK17-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 6), align 8 +// CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 +// CHECK17-NEXT: store i8* null, i8** [[TMP49]], align 8 +// CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64* +// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP51]], align 8 +// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64* +// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP53]], align 8 +// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 +// CHECK17-NEXT: store i8* null, i8** [[TMP54]], align 8 +// CHECK17-NEXT: [[TMP55:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPg_l152.region_id, i32 8, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 +// CHECK17-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPg_l152(float* [[TMP8]], %struct.St* [[TMP9]], i64 [[TMP1]], ppc_fp128* [[TMP10]], i64 [[TMP3]], i64 [[TMP5]], double* [[VLA]], i64 [[TMP12]]) #[[ATTR4:[0-9]+]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: -// CHECK17-NEXT: [[TMP68:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP68]]) +// CHECK17-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) // CHECK17-NEXT: ret void // // @@ -3544,7 +3527,6 @@ // CHECK17-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [10 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [10 x i8*], align 8 // CHECK17-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [10 x i8*], align 8 -// CHECK17-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK17-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 // CHECK17-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8 // CHECK17-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -3589,112 +3571,93 @@ // CHECK17-NEXT: [[TMP24:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK17-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to %struct.St** // CHECK17-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP25]], align 8 -// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: store i64 0, i64* [[TMP26]], align 8 -// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK17-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK17-NEXT: [[TMP28:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP29]], align 8 -// CHECK17-NEXT: [[TMP30:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK17-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP31]], align 8 -// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK17-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK17-NEXT: [[TMP33:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK17-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP26:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK17-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK17-NEXT: [[TMP27:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP28]], align 8 +// CHECK17-NEXT: [[TMP29:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK17-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK17-NEXT: store i64 [[TMP1]], i64* [[TMP30]], align 8 +// CHECK17-NEXT: [[TMP31:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK17-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK17-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK17-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to ppc_fp128** +// CHECK17-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP33]], align 8 +// CHECK17-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK17-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to ppc_fp128** // CHECK17-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP35]], align 8 -// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK17-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to ppc_fp128** -// CHECK17-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP37]], align 8 -// CHECK17-NEXT: [[TMP38:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK17-NEXT: store i64 0, i64* [[TMP38]], align 8 -// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK17-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK17-NEXT: [[TMP40:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK17-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* -// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP41]], align 8 -// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP36:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK17-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK17-NEXT: [[TMP37:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP38]], align 8 +// CHECK17-NEXT: [[TMP39:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK17-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP40]], align 8 +// CHECK17-NEXT: [[TMP41:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK17-NEXT: store i8* null, i8** [[TMP41]], align 8 +// CHECK17-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK17-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK17-NEXT: store i64 [[TMP3]], i64* [[TMP43]], align 8 -// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK17-NEXT: store i64 8, i64* [[TMP44]], align 8 -// CHECK17-NEXT: [[TMP45:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK17-NEXT: store i8* null, i8** [[TMP45]], align 8 -// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP47]], align 8 -// CHECK17-NEXT: [[TMP48:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK17-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i64* -// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP49]], align 8 -// CHECK17-NEXT: [[TMP50:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK17-NEXT: store i64 8, i64* [[TMP50]], align 8 -// CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP43]], align 8 +// CHECK17-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK17-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* +// CHECK17-NEXT: store i64 [[TMP5]], i64* [[TMP45]], align 8 +// CHECK17-NEXT: [[TMP46:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK17-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK17-NEXT: [[TMP47:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to double** +// CHECK17-NEXT: store double* [[VLA]], double** [[TMP48]], align 8 +// CHECK17-NEXT: [[TMP49:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK17-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to double** +// CHECK17-NEXT: store double* [[VLA]], double** [[TMP50]], align 8 +// CHECK17-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 5), align 8 +// CHECK17-NEXT: [[TMP51:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK17-NEXT: store i8* null, i8** [[TMP51]], align 8 -// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK17-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to double** -// CHECK17-NEXT: store double* [[VLA]], double** [[TMP53]], align 8 -// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK17-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to double** -// CHECK17-NEXT: store double* [[VLA]], double** [[TMP55]], align 8 -// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK17-NEXT: store i64 [[TMP14]], i64* [[TMP56]], align 8 -// CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK17-NEXT: store i8* null, i8** [[TMP57]], align 8 -// CHECK17-NEXT: [[TMP58:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to %struct.St** -// CHECK17-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP59]], align 8 -// CHECK17-NEXT: [[TMP60:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK17-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i32** -// CHECK17-NEXT: store i32* [[A3]], i32** [[TMP61]], align 8 -// CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK17-NEXT: store i64 [[TMP21]], i64* [[TMP62]], align 8 -// CHECK17-NEXT: [[TMP63:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 -// CHECK17-NEXT: store i8* null, i8** [[TMP63]], align 8 -// CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to %struct.St** -// CHECK17-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP65]], align 8 -// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK17-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32** -// CHECK17-NEXT: store i32* [[B2]], i32** [[TMP67]], align 8 -// CHECK17-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK17-NEXT: store i64 4, i64* [[TMP68]], align 8 -// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 -// CHECK17-NEXT: store i8* null, i8** [[TMP69]], align 8 -// CHECK17-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to %struct.St** -// CHECK17-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP71]], align 8 -// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 -// CHECK17-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32** -// CHECK17-NEXT: store i32* [[A3]], i32** [[TMP73]], align 8 -// CHECK17-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK17-NEXT: store i64 4, i64* [[TMP74]], align 8 -// CHECK17-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 8 -// CHECK17-NEXT: store i8* null, i8** [[TMP75]], align 8 -// CHECK17-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 -// CHECK17-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP77]], align 8 -// CHECK17-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 -// CHECK17-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* -// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP79]], align 8 -// CHECK17-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK17-NEXT: store i64 4, i64* [[TMP80]], align 8 -// CHECK17-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 9 -// CHECK17-NEXT: store i8* null, i8** [[TMP81]], align 8 -// CHECK17-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK17-NEXT: [[TMP85:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPg_l144.region_id, i32 10, i8** [[TMP82]], i8** [[TMP83]], i64* [[TMP84]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK17-NEXT: [[TMP86:%.*]] = icmp ne i32 [[TMP85]], 0 -// CHECK17-NEXT: br i1 [[TMP86]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK17-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK17-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to %struct.St** +// CHECK17-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP53]], align 8 +// CHECK17-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK17-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** +// CHECK17-NEXT: store i32* [[A3]], i32** [[TMP55]], align 8 +// CHECK17-NEXT: store i64 [[TMP21]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 6), align 8 +// CHECK17-NEXT: [[TMP56:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 +// CHECK17-NEXT: store i8* null, i8** [[TMP56]], align 8 +// CHECK17-NEXT: [[TMP57:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to %struct.St** +// CHECK17-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP58]], align 8 +// CHECK17-NEXT: [[TMP59:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK17-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32** +// CHECK17-NEXT: store i32* [[B2]], i32** [[TMP60]], align 8 +// CHECK17-NEXT: [[TMP61:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 +// CHECK17-NEXT: store i8* null, i8** [[TMP61]], align 8 +// CHECK17-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to %struct.St** +// CHECK17-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP63]], align 8 +// CHECK17-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 +// CHECK17-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** +// CHECK17-NEXT: store i32* [[A3]], i32** [[TMP65]], align 8 +// CHECK17-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 8 +// CHECK17-NEXT: store i8* null, i8** [[TMP66]], align 8 +// CHECK17-NEXT: [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 +// CHECK17-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP68]], align 8 +// CHECK17-NEXT: [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 +// CHECK17-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* +// CHECK17-NEXT: store i64 [[TMP12]], i64* [[TMP70]], align 8 +// CHECK17-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 9 +// CHECK17-NEXT: store i8* null, i8** [[TMP71]], align 8 +// CHECK17-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK17-NEXT: [[TMP74:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPg_l144.region_id, i32 10, i8** [[TMP72]], i8** [[TMP73]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK17-NEXT: [[TMP75:%.*]] = icmp ne i32 [[TMP74]], 0 +// CHECK17-NEXT: br i1 [[TMP75]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK17: omp_offload.failed: // CHECK17-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPg_l144(%struct.St* [[TMP9]], i64 [[TMP1]], ppc_fp128* [[TMP10]], i64 [[TMP3]], i64 [[TMP5]], double* [[VLA]], %struct.St* [[THIS1]], i64 [[TMP12]]) #[[ATTR4]] // CHECK17-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK17: omp_offload.cont: -// CHECK17-NEXT: [[TMP87:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP87]]) +// CHECK17-NEXT: [[TMP76:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK17-NEXT: call void @llvm.stackrestore(i8* [[TMP76]]) // CHECK17-NEXT: ret void // // @@ -3817,7 +3780,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [8 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [8 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [8 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [8 x i64], align 8 // CHECK18-NEXT: store float* [[A]], float** [[A_ADDR]], align 8 // CHECK18-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -3849,92 +3811,76 @@ // CHECK18-NEXT: [[TMP17:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP18:%.*]] = bitcast i8** [[TMP17]] to float** // CHECK18-NEXT: store float* [[TMP8]], float** [[TMP18]], align 8 -// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 0, i64* [[TMP19]], align 8 -// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP20]], align 8 -// CHECK18-NEXT: [[TMP21:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.St** -// CHECK18-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP22]], align 8 -// CHECK18-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to %struct.St** -// CHECK18-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP24]], align 8 -// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 0, i64* [[TMP25]], align 8 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP26]], align 8 -// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP19:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP19]], align 8 +// CHECK18-NEXT: [[TMP20:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.St** +// CHECK18-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP21]], align 8 +// CHECK18-NEXT: [[TMP22:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.St** +// CHECK18-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP23]], align 8 +// CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP24]], align 8 +// CHECK18-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP26]], align 8 +// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* // CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP28]], align 8 -// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP30]], align 8 -// CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 8, i64* [[TMP31]], align 8 -// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP32]], align 8 -// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to ppc_fp128** -// CHECK18-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP34]], align 8 -// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to ppc_fp128** -// CHECK18-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP36]], align 8 -// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 0, i64* [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP38]], align 8 -// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* -// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP40]], align 8 -// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP42:%.*]] = bitcast i8** [[TMP41]] to i64* -// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP42]], align 8 -// CHECK18-NEXT: [[TMP43:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 8, i64* [[TMP43]], align 8 -// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP29]], align 8 +// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to ppc_fp128** +// CHECK18-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP31]], align 8 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to ppc_fp128** +// CHECK18-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP33]], align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP34]], align 8 +// CHECK18-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i64* +// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP36]], align 8 +// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP38]], align 8 +// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8 +// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP41]], align 8 +// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP43]], align 8 +// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK18-NEXT: store i8* null, i8** [[TMP44]], align 8 -// CHECK18-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK18-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP46]], align 8 -// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK18-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP48]], align 8 -// CHECK18-NEXT: [[TMP49:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK18-NEXT: store i64 8, i64* [[TMP49]], align 8 -// CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK18-NEXT: store i8* null, i8** [[TMP50]], align 8 -// CHECK18-NEXT: [[TMP51:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to double** -// CHECK18-NEXT: store double* [[VLA]], double** [[TMP52]], align 8 -// CHECK18-NEXT: [[TMP53:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP54:%.*]] = bitcast i8** [[TMP53]] to double** -// CHECK18-NEXT: store double* [[VLA]], double** [[TMP54]], align 8 -// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK18-NEXT: store i64 [[TMP14]], i64* [[TMP55]], align 8 -// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 -// CHECK18-NEXT: store i8* null, i8** [[TMP56]], align 8 -// CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i64* -// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP58]], align 8 -// CHECK18-NEXT: [[TMP59:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i64* -// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP60]], align 8 -// CHECK18-NEXT: [[TMP61:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK18-NEXT: store i64 4, i64* [[TMP61]], align 8 -// CHECK18-NEXT: [[TMP62:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 -// CHECK18-NEXT: store i8* null, i8** [[TMP62]], align 8 -// CHECK18-NEXT: [[TMP63:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP64:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP65:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP66:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPg_l152.region_id, i32 8, i8** [[TMP63]], i8** [[TMP64]], i64* [[TMP65]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP67:%.*]] = icmp ne i32 [[TMP66]], 0 -// CHECK18-NEXT: br i1 [[TMP67]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK18-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double** +// CHECK18-NEXT: store double* [[VLA]], double** [[TMP46]], align 8 +// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK18-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to double** +// CHECK18-NEXT: store double* [[VLA]], double** [[TMP48]], align 8 +// CHECK18-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 6), align 8 +// CHECK18-NEXT: [[TMP49:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 +// CHECK18-NEXT: store i8* null, i8** [[TMP49]], align 8 +// CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i64* +// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP51]], align 8 +// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i64* +// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP53]], align 8 +// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 +// CHECK18-NEXT: store i8* null, i8** [[TMP54]], align 8 +// CHECK18-NEXT: [[TMP55:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP57:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPg_l152.region_id, i32 8, i8** [[TMP55]], i8** [[TMP56]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP58:%.*]] = icmp ne i32 [[TMP57]], 0 +// CHECK18-NEXT: br i1 [[TMP58]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPg_l152(float* [[TMP8]], %struct.St* [[TMP9]], i64 [[TMP1]], ppc_fp128* [[TMP10]], i64 [[TMP3]], i64 [[TMP5]], double* [[VLA]], i64 [[TMP12]]) #[[ATTR4:[0-9]+]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK18: omp_offload.cont: -// CHECK18-NEXT: [[TMP68:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP68]]) +// CHECK18-NEXT: [[TMP59:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP59]]) // CHECK18-NEXT: ret void // // @@ -4036,7 +3982,6 @@ // CHECK18-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [10 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [10 x i8*], align 8 // CHECK18-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [10 x i8*], align 8 -// CHECK18-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 8 // CHECK18-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 // CHECK18-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8 // CHECK18-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -4081,112 +4026,93 @@ // CHECK18-NEXT: [[TMP24:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK18-NEXT: [[TMP25:%.*]] = bitcast i8** [[TMP24]] to %struct.St** // CHECK18-NEXT: store %struct.St* [[TMP9]], %struct.St** [[TMP25]], align 8 -// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: store i64 0, i64* [[TMP26]], align 8 -// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 -// CHECK18-NEXT: store i8* null, i8** [[TMP27]], align 8 -// CHECK18-NEXT: [[TMP28:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP29]], align 8 -// CHECK18-NEXT: [[TMP30:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK18-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i64* -// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP31]], align 8 -// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK18-NEXT: store i64 8, i64* [[TMP32]], align 8 -// CHECK18-NEXT: [[TMP33:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 -// CHECK18-NEXT: store i8* null, i8** [[TMP33]], align 8 -// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP26:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 0 +// CHECK18-NEXT: store i8* null, i8** [[TMP26]], align 8 +// CHECK18-NEXT: [[TMP27:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP28]], align 8 +// CHECK18-NEXT: [[TMP29:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK18-NEXT: [[TMP30:%.*]] = bitcast i8** [[TMP29]] to i64* +// CHECK18-NEXT: store i64 [[TMP1]], i64* [[TMP30]], align 8 +// CHECK18-NEXT: [[TMP31:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 1 +// CHECK18-NEXT: store i8* null, i8** [[TMP31]], align 8 +// CHECK18-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK18-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to ppc_fp128** +// CHECK18-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP33]], align 8 +// CHECK18-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK18-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to ppc_fp128** // CHECK18-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP35]], align 8 -// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK18-NEXT: [[TMP37:%.*]] = bitcast i8** [[TMP36]] to ppc_fp128** -// CHECK18-NEXT: store ppc_fp128* [[TMP10]], ppc_fp128** [[TMP37]], align 8 -// CHECK18-NEXT: [[TMP38:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK18-NEXT: store i64 0, i64* [[TMP38]], align 8 -// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 -// CHECK18-NEXT: store i8* null, i8** [[TMP39]], align 8 -// CHECK18-NEXT: [[TMP40:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK18-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i64* -// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP41]], align 8 -// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP36:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 2 +// CHECK18-NEXT: store i8* null, i8** [[TMP36]], align 8 +// CHECK18-NEXT: [[TMP37:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i64* +// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP38]], align 8 +// CHECK18-NEXT: [[TMP39:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK18-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i64* +// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP40]], align 8 +// CHECK18-NEXT: [[TMP41:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 +// CHECK18-NEXT: store i8* null, i8** [[TMP41]], align 8 +// CHECK18-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK18-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i64* -// CHECK18-NEXT: store i64 [[TMP3]], i64* [[TMP43]], align 8 -// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK18-NEXT: store i64 8, i64* [[TMP44]], align 8 -// CHECK18-NEXT: [[TMP45:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 3 -// CHECK18-NEXT: store i8* null, i8** [[TMP45]], align 8 -// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP47]], align 8 -// CHECK18-NEXT: [[TMP48:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK18-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i64* -// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP49]], align 8 -// CHECK18-NEXT: [[TMP50:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK18-NEXT: store i64 8, i64* [[TMP50]], align 8 -// CHECK18-NEXT: [[TMP51:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP43]], align 8 +// CHECK18-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK18-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i64* +// CHECK18-NEXT: store i64 [[TMP5]], i64* [[TMP45]], align 8 +// CHECK18-NEXT: [[TMP46:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 4 +// CHECK18-NEXT: store i8* null, i8** [[TMP46]], align 8 +// CHECK18-NEXT: [[TMP47:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to double** +// CHECK18-NEXT: store double* [[VLA]], double** [[TMP48]], align 8 +// CHECK18-NEXT: [[TMP49:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK18-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to double** +// CHECK18-NEXT: store double* [[VLA]], double** [[TMP50]], align 8 +// CHECK18-NEXT: store i64 [[TMP14]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 5), align 8 +// CHECK18-NEXT: [[TMP51:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 // CHECK18-NEXT: store i8* null, i8** [[TMP51]], align 8 -// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK18-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to double** -// CHECK18-NEXT: store double* [[VLA]], double** [[TMP53]], align 8 -// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK18-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to double** -// CHECK18-NEXT: store double* [[VLA]], double** [[TMP55]], align 8 -// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK18-NEXT: store i64 [[TMP14]], i64* [[TMP56]], align 8 -// CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 5 -// CHECK18-NEXT: store i8* null, i8** [[TMP57]], align 8 -// CHECK18-NEXT: [[TMP58:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to %struct.St** -// CHECK18-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP59]], align 8 -// CHECK18-NEXT: [[TMP60:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK18-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to i32** -// CHECK18-NEXT: store i32* [[A3]], i32** [[TMP61]], align 8 -// CHECK18-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK18-NEXT: store i64 [[TMP21]], i64* [[TMP62]], align 8 -// CHECK18-NEXT: [[TMP63:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 -// CHECK18-NEXT: store i8* null, i8** [[TMP63]], align 8 -// CHECK18-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to %struct.St** -// CHECK18-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP65]], align 8 -// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK18-NEXT: [[TMP67:%.*]] = bitcast i8** [[TMP66]] to i32** -// CHECK18-NEXT: store i32* [[B2]], i32** [[TMP67]], align 8 -// CHECK18-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK18-NEXT: store i64 4, i64* [[TMP68]], align 8 -// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 -// CHECK18-NEXT: store i8* null, i8** [[TMP69]], align 8 -// CHECK18-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to %struct.St** -// CHECK18-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP71]], align 8 -// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 -// CHECK18-NEXT: [[TMP73:%.*]] = bitcast i8** [[TMP72]] to i32** -// CHECK18-NEXT: store i32* [[A3]], i32** [[TMP73]], align 8 -// CHECK18-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK18-NEXT: store i64 4, i64* [[TMP74]], align 8 -// CHECK18-NEXT: [[TMP75:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 8 -// CHECK18-NEXT: store i8* null, i8** [[TMP75]], align 8 -// CHECK18-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 -// CHECK18-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i64* -// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP77]], align 8 -// CHECK18-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 -// CHECK18-NEXT: [[TMP79:%.*]] = bitcast i8** [[TMP78]] to i64* -// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP79]], align 8 -// CHECK18-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK18-NEXT: store i64 4, i64* [[TMP80]], align 8 -// CHECK18-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 9 -// CHECK18-NEXT: store i8* null, i8** [[TMP81]], align 8 -// CHECK18-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP83:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP84:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK18-NEXT: [[TMP85:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPg_l144.region_id, i32 10, i8** [[TMP82]], i8** [[TMP83]], i64* [[TMP84]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK18-NEXT: [[TMP86:%.*]] = icmp ne i32 [[TMP85]], 0 -// CHECK18-NEXT: br i1 [[TMP86]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK18-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK18-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to %struct.St** +// CHECK18-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP53]], align 8 +// CHECK18-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK18-NEXT: [[TMP55:%.*]] = bitcast i8** [[TMP54]] to i32** +// CHECK18-NEXT: store i32* [[A3]], i32** [[TMP55]], align 8 +// CHECK18-NEXT: store i64 [[TMP21]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 6), align 8 +// CHECK18-NEXT: [[TMP56:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 6 +// CHECK18-NEXT: store i8* null, i8** [[TMP56]], align 8 +// CHECK18-NEXT: [[TMP57:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to %struct.St** +// CHECK18-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP58]], align 8 +// CHECK18-NEXT: [[TMP59:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK18-NEXT: [[TMP60:%.*]] = bitcast i8** [[TMP59]] to i32** +// CHECK18-NEXT: store i32* [[B2]], i32** [[TMP60]], align 8 +// CHECK18-NEXT: [[TMP61:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 7 +// CHECK18-NEXT: store i8* null, i8** [[TMP61]], align 8 +// CHECK18-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to %struct.St** +// CHECK18-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP63]], align 8 +// CHECK18-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 +// CHECK18-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** +// CHECK18-NEXT: store i32* [[A3]], i32** [[TMP65]], align 8 +// CHECK18-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 8 +// CHECK18-NEXT: store i8* null, i8** [[TMP66]], align 8 +// CHECK18-NEXT: [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 +// CHECK18-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i64* +// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP68]], align 8 +// CHECK18-NEXT: [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 +// CHECK18-NEXT: [[TMP70:%.*]] = bitcast i8** [[TMP69]] to i64* +// CHECK18-NEXT: store i64 [[TMP12]], i64* [[TMP70]], align 8 +// CHECK18-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i64 0, i64 9 +// CHECK18-NEXT: store i8* null, i8** [[TMP71]], align 8 +// CHECK18-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK18-NEXT: [[TMP74:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPg_l144.region_id, i32 10, i8** [[TMP72]], i8** [[TMP73]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK18-NEXT: [[TMP75:%.*]] = icmp ne i32 [[TMP74]], 0 +// CHECK18-NEXT: br i1 [[TMP75]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK18: omp_offload.failed: // CHECK18-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPg_l144(%struct.St* [[TMP9]], i64 [[TMP1]], ppc_fp128* [[TMP10]], i64 [[TMP3]], i64 [[TMP5]], double* [[VLA]], %struct.St* [[THIS1]], i64 [[TMP12]]) #[[ATTR4]] // CHECK18-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK18: omp_offload.cont: -// CHECK18-NEXT: [[TMP87:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 -// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP87]]) +// CHECK18-NEXT: [[TMP76:%.*]] = load i8*, i8** [[SAVED_STACK]], align 8 +// CHECK18-NEXT: call void @llvm.stackrestore(i8* [[TMP76]]) // CHECK18-NEXT: ret void // // @@ -4309,7 +4235,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [8 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [8 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [8 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [8 x i64], align 4 // CHECK19-NEXT: store float* [[A]], float** [[A_ADDR]], align 4 // CHECK19-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 4 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -4338,92 +4263,76 @@ // CHECK19-NEXT: [[TMP15:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** // CHECK19-NEXT: store float* [[TMP5]], float** [[TMP16]], align 4 -// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 0, i64* [[TMP17]], align 4 -// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK19-NEXT: [[TMP19:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.St** -// CHECK19-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP20]], align 4 -// CHECK19-NEXT: [[TMP21:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.St** -// CHECK19-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP22]], align 4 -// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 0, i64* [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP17:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK19-NEXT: [[TMP18:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.St** +// CHECK19-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP19]], align 4 +// CHECK19-NEXT: [[TMP20:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.St** +// CHECK19-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP21]], align 4 +// CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK19-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* // CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP28]], align 4 -// CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 4, i64* [[TMP29]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP30]], align 4 -// CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to x86_fp80** -// CHECK19-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP32]], align 4 -// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to x86_fp80** -// CHECK19-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP34]], align 4 -// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 0, i64* [[TMP35]], align 4 -// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP36]], align 4 -// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP38]], align 4 -// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP40]], align 4 -// CHECK19-NEXT: [[TMP41:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to x86_fp80** +// CHECK19-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP29]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to x86_fp80** +// CHECK19-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP31]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP32]], align 4 +// CHECK19-NEXT: [[TMP33:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP36]], align 4 +// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP39]], align 4 +// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP41]], align 4 +// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK19-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP44]], align 4 -// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP46]], align 4 -// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK19-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK19-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to double** -// CHECK19-NEXT: store double* [[VLA]], double** [[TMP50]], align 4 -// CHECK19-NEXT: [[TMP51:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to double** -// CHECK19-NEXT: store double* [[VLA]], double** [[TMP52]], align 4 -// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK19-NEXT: store i64 [[TMP12]], i64* [[TMP53]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 -// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32* -// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP56]], align 4 -// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32* -// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP58]], align 4 -// CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK19-NEXT: store i64 4, i64* [[TMP59]], align 4 -// CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 -// CHECK19-NEXT: store i8* null, i8** [[TMP60]], align 4 -// CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP63:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPe_l152.region_id, i32 8, i8** [[TMP61]], i8** [[TMP62]], i64* [[TMP63]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0 -// CHECK19-NEXT: br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK19-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double** +// CHECK19-NEXT: store double* [[VLA]], double** [[TMP44]], align 4 +// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double** +// CHECK19-NEXT: store double* [[VLA]], double** [[TMP46]], align 4 +// CHECK19-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 6), align 4 +// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 +// CHECK19-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32* +// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP49]], align 4 +// CHECK19-NEXT: [[TMP50:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32* +// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP51]], align 4 +// CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 +// CHECK19-NEXT: store i8* null, i8** [[TMP52]], align 4 +// CHECK19-NEXT: [[TMP53:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP55:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPe_l152.region_id, i32 8, i8** [[TMP53]], i8** [[TMP54]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP56:%.*]] = icmp ne i32 [[TMP55]], 0 +// CHECK19-NEXT: br i1 [[TMP56]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPe_l152(float* [[TMP5]], %struct.St* [[TMP6]], i32 [[TMP0]], x86_fp80* [[TMP7]], i32 [[TMP1]], i32 [[TMP2]], double* [[VLA]], i32 [[TMP9]]) #[[ATTR4:[0-9]+]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: -// CHECK19-NEXT: [[TMP66:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP66]]) +// CHECK19-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) // CHECK19-NEXT: ret void // // @@ -4524,7 +4433,6 @@ // CHECK19-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [10 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [10 x i8*], align 4 // CHECK19-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [10 x i8*], align 4 -// CHECK19-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK19-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 4 // CHECK19-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 4 // CHECK19-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -4566,112 +4474,93 @@ // CHECK19-NEXT: [[TMP22:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK19-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.St** // CHECK19-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP23]], align 4 -// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: store i64 0, i64* [[TMP24]], align 4 -// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK19-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK19-NEXT: [[TMP26:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP27]], align 4 -// CHECK19-NEXT: [[TMP28:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK19-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP29]], align 4 -// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK19-NEXT: store i64 4, i64* [[TMP30]], align 4 -// CHECK19-NEXT: [[TMP31:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK19-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP24:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK19-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK19-NEXT: [[TMP25:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 +// CHECK19-NEXT: [[TMP27:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK19-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK19-NEXT: store i32 [[TMP0]], i32* [[TMP28]], align 4 +// CHECK19-NEXT: [[TMP29:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK19-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK19-NEXT: [[TMP30:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK19-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to x86_fp80** +// CHECK19-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP31]], align 4 +// CHECK19-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK19-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to x86_fp80** // CHECK19-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP33]], align 4 -// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK19-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to x86_fp80** -// CHECK19-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP35]], align 4 -// CHECK19-NEXT: [[TMP36:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK19-NEXT: store i64 0, i64* [[TMP36]], align 4 -// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK19-NEXT: store i8* null, i8** [[TMP37]], align 4 -// CHECK19-NEXT: [[TMP38:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK19-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP39]], align 4 -// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK19-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK19-NEXT: [[TMP35:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP36]], align 4 +// CHECK19-NEXT: [[TMP37:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK19-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* +// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP38]], align 4 +// CHECK19-NEXT: [[TMP39:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK19-NEXT: store i8* null, i8** [[TMP39]], align 4 +// CHECK19-NEXT: [[TMP40:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK19-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* -// CHECK19-NEXT: store i32 [[TMP1]], i32* [[TMP41]], align 4 -// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK19-NEXT: store i64 4, i64* [[TMP42]], align 4 -// CHECK19-NEXT: [[TMP43:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK19-NEXT: store i8* null, i8** [[TMP43]], align 4 -// CHECK19-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32* -// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP45]], align 4 -// CHECK19-NEXT: [[TMP46:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK19-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32* -// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP47]], align 4 -// CHECK19-NEXT: [[TMP48:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK19-NEXT: store i64 4, i64* [[TMP48]], align 4 -// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP41]], align 4 +// CHECK19-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK19-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32* +// CHECK19-NEXT: store i32 [[TMP2]], i32* [[TMP43]], align 4 +// CHECK19-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK19-NEXT: store i8* null, i8** [[TMP44]], align 4 +// CHECK19-NEXT: [[TMP45:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double** +// CHECK19-NEXT: store double* [[VLA]], double** [[TMP46]], align 4 +// CHECK19-NEXT: [[TMP47:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK19-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to double** +// CHECK19-NEXT: store double* [[VLA]], double** [[TMP48]], align 4 +// CHECK19-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 5), align 4 +// CHECK19-NEXT: [[TMP49:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK19-NEXT: store i8* null, i8** [[TMP49]], align 4 -// CHECK19-NEXT: [[TMP50:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK19-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double** -// CHECK19-NEXT: store double* [[VLA]], double** [[TMP51]], align 4 -// CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK19-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to double** -// CHECK19-NEXT: store double* [[VLA]], double** [[TMP53]], align 4 -// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK19-NEXT: store i64 [[TMP12]], i64* [[TMP54]], align 4 -// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK19-NEXT: store i8* null, i8** [[TMP55]], align 4 -// CHECK19-NEXT: [[TMP56:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to %struct.St** -// CHECK19-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP57]], align 4 -// CHECK19-NEXT: [[TMP58:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK19-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to i32** -// CHECK19-NEXT: store i32* [[A3]], i32** [[TMP59]], align 4 -// CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK19-NEXT: store i64 [[TMP19]], i64* [[TMP60]], align 4 -// CHECK19-NEXT: [[TMP61:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 -// CHECK19-NEXT: store i8* null, i8** [[TMP61]], align 4 -// CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to %struct.St** -// CHECK19-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP63]], align 4 -// CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK19-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** -// CHECK19-NEXT: store i32* [[B2]], i32** [[TMP65]], align 4 -// CHECK19-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK19-NEXT: store i64 4, i64* [[TMP66]], align 4 -// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 -// CHECK19-NEXT: store i8* null, i8** [[TMP67]], align 4 -// CHECK19-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to %struct.St** -// CHECK19-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP69]], align 4 -// CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 -// CHECK19-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** -// CHECK19-NEXT: store i32* [[A3]], i32** [[TMP71]], align 4 -// CHECK19-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK19-NEXT: store i64 4, i64* [[TMP72]], align 4 -// CHECK19-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 8 -// CHECK19-NEXT: store i8* null, i8** [[TMP73]], align 4 -// CHECK19-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 -// CHECK19-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* -// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP75]], align 4 -// CHECK19-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 -// CHECK19-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP77]], align 4 -// CHECK19-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK19-NEXT: store i64 4, i64* [[TMP78]], align 4 -// CHECK19-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 9 -// CHECK19-NEXT: store i8* null, i8** [[TMP79]], align 4 -// CHECK19-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK19-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPe_l144.region_id, i32 10, i8** [[TMP80]], i8** [[TMP81]], i64* [[TMP82]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK19-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0 -// CHECK19-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK19-NEXT: [[TMP50:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK19-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to %struct.St** +// CHECK19-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP51]], align 4 +// CHECK19-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK19-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32** +// CHECK19-NEXT: store i32* [[A3]], i32** [[TMP53]], align 4 +// CHECK19-NEXT: store i64 [[TMP19]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 6), align 4 +// CHECK19-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 +// CHECK19-NEXT: store i8* null, i8** [[TMP54]], align 4 +// CHECK19-NEXT: [[TMP55:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to %struct.St** +// CHECK19-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP56]], align 4 +// CHECK19-NEXT: [[TMP57:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK19-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32** +// CHECK19-NEXT: store i32* [[B2]], i32** [[TMP58]], align 4 +// CHECK19-NEXT: [[TMP59:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 +// CHECK19-NEXT: store i8* null, i8** [[TMP59]], align 4 +// CHECK19-NEXT: [[TMP60:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to %struct.St** +// CHECK19-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP61]], align 4 +// CHECK19-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 +// CHECK19-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32** +// CHECK19-NEXT: store i32* [[A3]], i32** [[TMP63]], align 4 +// CHECK19-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 8 +// CHECK19-NEXT: store i8* null, i8** [[TMP64]], align 4 +// CHECK19-NEXT: [[TMP65:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 +// CHECK19-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP66]], align 4 +// CHECK19-NEXT: [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 +// CHECK19-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK19-NEXT: store i32 [[TMP9]], i32* [[TMP68]], align 4 +// CHECK19-NEXT: [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 9 +// CHECK19-NEXT: store i8* null, i8** [[TMP69]], align 4 +// CHECK19-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK19-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPe_l144.region_id, i32 10, i8** [[TMP70]], i8** [[TMP71]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK19-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0 +// CHECK19-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK19: omp_offload.failed: // CHECK19-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPe_l144(%struct.St* [[TMP6]], i32 [[TMP0]], x86_fp80* [[TMP7]], i32 [[TMP1]], i32 [[TMP2]], double* [[VLA]], %struct.St* [[THIS1]], i32 [[TMP9]]) #[[ATTR4]] // CHECK19-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK19: omp_offload.cont: -// CHECK19-NEXT: [[TMP85:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP85]]) +// CHECK19-NEXT: [[TMP74:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK19-NEXT: call void @llvm.stackrestore(i8* [[TMP74]]) // CHECK19-NEXT: ret void // // @@ -4791,7 +4680,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [8 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [8 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [8 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [8 x i64], align 4 // CHECK20-NEXT: store float* [[A]], float** [[A_ADDR]], align 4 // CHECK20-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 4 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -4820,92 +4708,76 @@ // CHECK20-NEXT: [[TMP15:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP16:%.*]] = bitcast i8** [[TMP15]] to float** // CHECK20-NEXT: store float* [[TMP5]], float** [[TMP16]], align 4 -// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 0, i64* [[TMP17]], align 4 -// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP18]], align 4 -// CHECK20-NEXT: [[TMP19:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP20:%.*]] = bitcast i8** [[TMP19]] to %struct.St** -// CHECK20-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP20]], align 4 -// CHECK20-NEXT: [[TMP21:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP22:%.*]] = bitcast i8** [[TMP21]] to %struct.St** -// CHECK20-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP22]], align 4 -// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 0, i64* [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 -// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP17:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP17]], align 4 +// CHECK20-NEXT: [[TMP18:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP19:%.*]] = bitcast i8** [[TMP18]] to %struct.St** +// CHECK20-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP19]], align 4 +// CHECK20-NEXT: [[TMP20:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP21:%.*]] = bitcast i8** [[TMP20]] to %struct.St** +// CHECK20-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP21]], align 4 +// CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP22]], align 4 +// CHECK20-NEXT: [[TMP23:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP24:%.*]] = bitcast i8** [[TMP23]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP24]], align 4 +// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* // CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 -// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP28]], align 4 -// CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 4, i64* [[TMP29]], align 4 -// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP30]], align 4 -// CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP32:%.*]] = bitcast i8** [[TMP31]] to x86_fp80** -// CHECK20-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP32]], align 4 -// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to x86_fp80** -// CHECK20-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP34]], align 4 -// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 0, i64* [[TMP35]], align 4 -// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP36]], align 4 -// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP38]], align 4 -// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP40:%.*]] = bitcast i8** [[TMP39]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP40]], align 4 -// CHECK20-NEXT: [[TMP41:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 4, i64* [[TMP41]], align 4 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP27]], align 4 +// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to x86_fp80** +// CHECK20-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP29]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to x86_fp80** +// CHECK20-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP31]], align 4 +// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP32]], align 4 +// CHECK20-NEXT: [[TMP33:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP34:%.*]] = bitcast i8** [[TMP33]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP36]], align 4 +// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP37]], align 4 +// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* +// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP39]], align 4 +// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* +// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP41]], align 4 +// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK20-NEXT: store i8* null, i8** [[TMP42]], align 4 -// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to i32* -// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP44]], align 4 -// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to i32* -// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP46]], align 4 -// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK20-NEXT: store i64 4, i64* [[TMP47]], align 4 -// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK20-NEXT: store i8* null, i8** [[TMP48]], align 4 -// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP50:%.*]] = bitcast i8** [[TMP49]] to double** -// CHECK20-NEXT: store double* [[VLA]], double** [[TMP50]], align 4 -// CHECK20-NEXT: [[TMP51:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP52:%.*]] = bitcast i8** [[TMP51]] to double** -// CHECK20-NEXT: store double* [[VLA]], double** [[TMP52]], align 4 -// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK20-NEXT: store i64 [[TMP12]], i64* [[TMP53]], align 4 -// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 -// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4 -// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to i32* -// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP56]], align 4 -// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32* -// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP58]], align 4 -// CHECK20-NEXT: [[TMP59:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK20-NEXT: store i64 4, i64* [[TMP59]], align 4 -// CHECK20-NEXT: [[TMP60:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 -// CHECK20-NEXT: store i8* null, i8** [[TMP60]], align 4 -// CHECK20-NEXT: [[TMP61:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP62:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP63:%.*]] = getelementptr inbounds [8 x i64], [8 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP64:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPe_l152.region_id, i32 8, i8** [[TMP61]], i8** [[TMP62]], i64* [[TMP63]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP65:%.*]] = icmp ne i32 [[TMP64]], 0 -// CHECK20-NEXT: br i1 [[TMP65]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK20-NEXT: [[TMP44:%.*]] = bitcast i8** [[TMP43]] to double** +// CHECK20-NEXT: store double* [[VLA]], double** [[TMP44]], align 4 +// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double** +// CHECK20-NEXT: store double* [[VLA]], double** [[TMP46]], align 4 +// CHECK20-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 6), align 4 +// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 +// CHECK20-NEXT: store i8* null, i8** [[TMP47]], align 4 +// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP49:%.*]] = bitcast i8** [[TMP48]] to i32* +// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP49]], align 4 +// CHECK20-NEXT: [[TMP50:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to i32* +// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP51]], align 4 +// CHECK20-NEXT: [[TMP52:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 +// CHECK20-NEXT: store i8* null, i8** [[TMP52]], align 4 +// CHECK20-NEXT: [[TMP53:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [8 x i8*], [8 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP55:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1:[0-9]+]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPe_l152.region_id, i32 8, i8** [[TMP53]], i8** [[TMP54]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_sizes, i32 0, i32 0), i64* getelementptr inbounds ([8 x i64], [8 x i64]* @.offload_maptypes, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP56:%.*]] = icmp ne i32 [[TMP55]], 0 +// CHECK20-NEXT: br i1 [[TMP56]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__Z10array_funcPfP2StiPe_l152(float* [[TMP5]], %struct.St* [[TMP6]], i32 [[TMP0]], x86_fp80* [[TMP7]], i32 [[TMP1]], i32 [[TMP2]], double* [[VLA]], i32 [[TMP9]]) #[[ATTR4:[0-9]+]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK20: omp_offload.cont: -// CHECK20-NEXT: [[TMP66:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP66]]) +// CHECK20-NEXT: [[TMP57:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP57]]) // CHECK20-NEXT: ret void // // @@ -5006,7 +4878,6 @@ // CHECK20-NEXT: [[DOTOFFLOAD_BASEPTRS:%.*]] = alloca [10 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_PTRS:%.*]] = alloca [10 x i8*], align 4 // CHECK20-NEXT: [[DOTOFFLOAD_MAPPERS:%.*]] = alloca [10 x i8*], align 4 -// CHECK20-NEXT: [[DOTOFFLOAD_SIZES:%.*]] = alloca [10 x i64], align 4 // CHECK20-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 4 // CHECK20-NEXT: store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 4 // CHECK20-NEXT: store i32 [[N]], i32* [[N_ADDR]], align 4 @@ -5048,112 +4919,93 @@ // CHECK20-NEXT: [[TMP22:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 // CHECK20-NEXT: [[TMP23:%.*]] = bitcast i8** [[TMP22]] to %struct.St** // CHECK20-NEXT: store %struct.St* [[TMP6]], %struct.St** [[TMP23]], align 4 -// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: store i64 0, i64* [[TMP24]], align 4 -// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 -// CHECK20-NEXT: store i8* null, i8** [[TMP25]], align 4 -// CHECK20-NEXT: [[TMP26:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP27:%.*]] = bitcast i8** [[TMP26]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP27]], align 4 -// CHECK20-NEXT: [[TMP28:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 -// CHECK20-NEXT: [[TMP29:%.*]] = bitcast i8** [[TMP28]] to i32* -// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP29]], align 4 -// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 1 -// CHECK20-NEXT: store i64 4, i64* [[TMP30]], align 4 -// CHECK20-NEXT: [[TMP31:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 -// CHECK20-NEXT: store i8* null, i8** [[TMP31]], align 4 -// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP24:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 0 +// CHECK20-NEXT: store i8* null, i8** [[TMP24]], align 4 +// CHECK20-NEXT: [[TMP25:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP26:%.*]] = bitcast i8** [[TMP25]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP26]], align 4 +// CHECK20-NEXT: [[TMP27:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 1 +// CHECK20-NEXT: [[TMP28:%.*]] = bitcast i8** [[TMP27]] to i32* +// CHECK20-NEXT: store i32 [[TMP0]], i32* [[TMP28]], align 4 +// CHECK20-NEXT: [[TMP29:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 1 +// CHECK20-NEXT: store i8* null, i8** [[TMP29]], align 4 +// CHECK20-NEXT: [[TMP30:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 2 +// CHECK20-NEXT: [[TMP31:%.*]] = bitcast i8** [[TMP30]] to x86_fp80** +// CHECK20-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP31]], align 4 +// CHECK20-NEXT: [[TMP32:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 // CHECK20-NEXT: [[TMP33:%.*]] = bitcast i8** [[TMP32]] to x86_fp80** // CHECK20-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP33]], align 4 -// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 2 -// CHECK20-NEXT: [[TMP35:%.*]] = bitcast i8** [[TMP34]] to x86_fp80** -// CHECK20-NEXT: store x86_fp80* [[TMP7]], x86_fp80** [[TMP35]], align 4 -// CHECK20-NEXT: [[TMP36:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 2 -// CHECK20-NEXT: store i64 0, i64* [[TMP36]], align 4 -// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 -// CHECK20-NEXT: store i8* null, i8** [[TMP37]], align 4 -// CHECK20-NEXT: [[TMP38:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 -// CHECK20-NEXT: [[TMP39:%.*]] = bitcast i8** [[TMP38]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP39]], align 4 -// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP34:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 2 +// CHECK20-NEXT: store i8* null, i8** [[TMP34]], align 4 +// CHECK20-NEXT: [[TMP35:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP36:%.*]] = bitcast i8** [[TMP35]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP36]], align 4 +// CHECK20-NEXT: [[TMP37:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 3 +// CHECK20-NEXT: [[TMP38:%.*]] = bitcast i8** [[TMP37]] to i32* +// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP38]], align 4 +// CHECK20-NEXT: [[TMP39:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 +// CHECK20-NEXT: store i8* null, i8** [[TMP39]], align 4 +// CHECK20-NEXT: [[TMP40:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 // CHECK20-NEXT: [[TMP41:%.*]] = bitcast i8** [[TMP40]] to i32* -// CHECK20-NEXT: store i32 [[TMP1]], i32* [[TMP41]], align 4 -// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 3 -// CHECK20-NEXT: store i64 4, i64* [[TMP42]], align 4 -// CHECK20-NEXT: [[TMP43:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 3 -// CHECK20-NEXT: store i8* null, i8** [[TMP43]], align 4 -// CHECK20-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP45:%.*]] = bitcast i8** [[TMP44]] to i32* -// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP45]], align 4 -// CHECK20-NEXT: [[TMP46:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 -// CHECK20-NEXT: [[TMP47:%.*]] = bitcast i8** [[TMP46]] to i32* -// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP47]], align 4 -// CHECK20-NEXT: [[TMP48:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 4 -// CHECK20-NEXT: store i64 4, i64* [[TMP48]], align 4 -// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP41]], align 4 +// CHECK20-NEXT: [[TMP42:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 4 +// CHECK20-NEXT: [[TMP43:%.*]] = bitcast i8** [[TMP42]] to i32* +// CHECK20-NEXT: store i32 [[TMP2]], i32* [[TMP43]], align 4 +// CHECK20-NEXT: [[TMP44:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 4 +// CHECK20-NEXT: store i8* null, i8** [[TMP44]], align 4 +// CHECK20-NEXT: [[TMP45:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP46:%.*]] = bitcast i8** [[TMP45]] to double** +// CHECK20-NEXT: store double* [[VLA]], double** [[TMP46]], align 4 +// CHECK20-NEXT: [[TMP47:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 +// CHECK20-NEXT: [[TMP48:%.*]] = bitcast i8** [[TMP47]] to double** +// CHECK20-NEXT: store double* [[VLA]], double** [[TMP48]], align 4 +// CHECK20-NEXT: store i64 [[TMP12]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 5), align 4 +// CHECK20-NEXT: [[TMP49:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 // CHECK20-NEXT: store i8* null, i8** [[TMP49]], align 4 -// CHECK20-NEXT: [[TMP50:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 5 -// CHECK20-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to double** -// CHECK20-NEXT: store double* [[VLA]], double** [[TMP51]], align 4 -// CHECK20-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 5 -// CHECK20-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to double** -// CHECK20-NEXT: store double* [[VLA]], double** [[TMP53]], align 4 -// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 5 -// CHECK20-NEXT: store i64 [[TMP12]], i64* [[TMP54]], align 4 -// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 5 -// CHECK20-NEXT: store i8* null, i8** [[TMP55]], align 4 -// CHECK20-NEXT: [[TMP56:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP57:%.*]] = bitcast i8** [[TMP56]] to %struct.St** -// CHECK20-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP57]], align 4 -// CHECK20-NEXT: [[TMP58:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 -// CHECK20-NEXT: [[TMP59:%.*]] = bitcast i8** [[TMP58]] to i32** -// CHECK20-NEXT: store i32* [[A3]], i32** [[TMP59]], align 4 -// CHECK20-NEXT: [[TMP60:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 6 -// CHECK20-NEXT: store i64 [[TMP19]], i64* [[TMP60]], align 4 -// CHECK20-NEXT: [[TMP61:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 -// CHECK20-NEXT: store i8* null, i8** [[TMP61]], align 4 -// CHECK20-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to %struct.St** -// CHECK20-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP63]], align 4 -// CHECK20-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 -// CHECK20-NEXT: [[TMP65:%.*]] = bitcast i8** [[TMP64]] to i32** -// CHECK20-NEXT: store i32* [[B2]], i32** [[TMP65]], align 4 -// CHECK20-NEXT: [[TMP66:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 7 -// CHECK20-NEXT: store i64 4, i64* [[TMP66]], align 4 -// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 -// CHECK20-NEXT: store i8* null, i8** [[TMP67]], align 4 -// CHECK20-NEXT: [[TMP68:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP69:%.*]] = bitcast i8** [[TMP68]] to %struct.St** -// CHECK20-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP69]], align 4 -// CHECK20-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 -// CHECK20-NEXT: [[TMP71:%.*]] = bitcast i8** [[TMP70]] to i32** -// CHECK20-NEXT: store i32* [[A3]], i32** [[TMP71]], align 4 -// CHECK20-NEXT: [[TMP72:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 8 -// CHECK20-NEXT: store i64 4, i64* [[TMP72]], align 4 -// CHECK20-NEXT: [[TMP73:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 8 -// CHECK20-NEXT: store i8* null, i8** [[TMP73]], align 4 -// CHECK20-NEXT: [[TMP74:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 -// CHECK20-NEXT: [[TMP75:%.*]] = bitcast i8** [[TMP74]] to i32* -// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP75]], align 4 -// CHECK20-NEXT: [[TMP76:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 -// CHECK20-NEXT: [[TMP77:%.*]] = bitcast i8** [[TMP76]] to i32* -// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP77]], align 4 -// CHECK20-NEXT: [[TMP78:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 9 -// CHECK20-NEXT: store i64 4, i64* [[TMP78]], align 4 -// CHECK20-NEXT: [[TMP79:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 9 -// CHECK20-NEXT: store i8* null, i8** [[TMP79]], align 4 -// CHECK20-NEXT: [[TMP80:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP81:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP82:%.*]] = getelementptr inbounds [10 x i64], [10 x i64]* [[DOTOFFLOAD_SIZES]], i32 0, i32 0 -// CHECK20-NEXT: [[TMP83:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPe_l144.region_id, i32 10, i8** [[TMP80]], i8** [[TMP81]], i64* [[TMP82]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.2, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) -// CHECK20-NEXT: [[TMP84:%.*]] = icmp ne i32 [[TMP83]], 0 -// CHECK20-NEXT: br i1 [[TMP84]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] +// CHECK20-NEXT: [[TMP50:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 6 +// CHECK20-NEXT: [[TMP51:%.*]] = bitcast i8** [[TMP50]] to %struct.St** +// CHECK20-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP51]], align 4 +// CHECK20-NEXT: [[TMP52:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 6 +// CHECK20-NEXT: [[TMP53:%.*]] = bitcast i8** [[TMP52]] to i32** +// CHECK20-NEXT: store i32* [[A3]], i32** [[TMP53]], align 4 +// CHECK20-NEXT: store i64 [[TMP19]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 6), align 4 +// CHECK20-NEXT: [[TMP54:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 6 +// CHECK20-NEXT: store i8* null, i8** [[TMP54]], align 4 +// CHECK20-NEXT: [[TMP55:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP56:%.*]] = bitcast i8** [[TMP55]] to %struct.St** +// CHECK20-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP56]], align 4 +// CHECK20-NEXT: [[TMP57:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 7 +// CHECK20-NEXT: [[TMP58:%.*]] = bitcast i8** [[TMP57]] to i32** +// CHECK20-NEXT: store i32* [[B2]], i32** [[TMP58]], align 4 +// CHECK20-NEXT: [[TMP59:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 7 +// CHECK20-NEXT: store i8* null, i8** [[TMP59]], align 4 +// CHECK20-NEXT: [[TMP60:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP61:%.*]] = bitcast i8** [[TMP60]] to %struct.St** +// CHECK20-NEXT: store %struct.St* [[THIS1]], %struct.St** [[TMP61]], align 4 +// CHECK20-NEXT: [[TMP62:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 8 +// CHECK20-NEXT: [[TMP63:%.*]] = bitcast i8** [[TMP62]] to i32** +// CHECK20-NEXT: store i32* [[A3]], i32** [[TMP63]], align 4 +// CHECK20-NEXT: [[TMP64:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 8 +// CHECK20-NEXT: store i8* null, i8** [[TMP64]], align 4 +// CHECK20-NEXT: [[TMP65:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 9 +// CHECK20-NEXT: [[TMP66:%.*]] = bitcast i8** [[TMP65]] to i32* +// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP66]], align 4 +// CHECK20-NEXT: [[TMP67:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 9 +// CHECK20-NEXT: [[TMP68:%.*]] = bitcast i8** [[TMP67]] to i32* +// CHECK20-NEXT: store i32 [[TMP9]], i32* [[TMP68]], align 4 +// CHECK20-NEXT: [[TMP69:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_MAPPERS]], i32 0, i32 9 +// CHECK20-NEXT: store i8* null, i8** [[TMP69]], align 4 +// CHECK20-NEXT: [[TMP70:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_BASEPTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP71:%.*]] = getelementptr inbounds [10 x i8*], [10 x i8*]* [[DOTOFFLOAD_PTRS]], i32 0, i32 0 +// CHECK20-NEXT: [[TMP72:%.*]] = call i32 @__tgt_target_teams_mapper(%struct.ident_t* @[[GLOB1]], i64 -1, i8* @.{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPe_l144.region_id, i32 10, i8** [[TMP70]], i8** [[TMP71]], i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_sizes.2, i32 0, i32 0), i64* getelementptr inbounds ([10 x i64], [10 x i64]* @.offload_maptypes.3, i32 0, i32 0), i8** null, i8** null, i32 0, i32 0) +// CHECK20-NEXT: [[TMP73:%.*]] = icmp ne i32 [[TMP72]], 0 +// CHECK20-NEXT: br i1 [[TMP73]], label [[OMP_OFFLOAD_FAILED:%.*]], label [[OMP_OFFLOAD_CONT:%.*]] // CHECK20: omp_offload.failed: // CHECK20-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}__ZN2St7St_funcEPS_iPe_l144(%struct.St* [[TMP6]], i32 [[TMP0]], x86_fp80* [[TMP7]], i32 [[TMP1]], i32 [[TMP2]], double* [[VLA]], %struct.St* [[THIS1]], i32 [[TMP9]]) #[[ATTR4]] // CHECK20-NEXT: br label [[OMP_OFFLOAD_CONT]] // CHECK20: omp_offload.cont: -// CHECK20-NEXT: [[TMP85:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 -// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP85]]) +// CHECK20-NEXT: [[TMP74:%.*]] = load i8*, i8** [[SAVED_STACK]], align 4 +// CHECK20-NEXT: call void @llvm.stackrestore(i8* [[TMP74]]) // CHECK20-NEXT: ret void // //